From 0ff2f910f2f78238be24054acf28b9d54133c29a Mon Sep 17 00:00:00 2001 From: sugarme Date: Wed, 22 Jul 2020 15:56:30 +1000 Subject: [PATCH] BREAKING CHANGE: switch to auto-generated --- .gitignore | 1 + example/cifar/main.go | 2 +- example/mnist/cnn.go | 2 +- example/mnist/linear.go | 22 +- example/mnist/nn.go | 4 +- example/neural-style-transfer/main.go | 16 +- .../main.go | 0 example/tensor-grad/main.go | 6 +- example/transfer-learning/main.go | 2 +- example/yolo/darknet.go | 8 +- example/yolo/main.go | 4 +- gen/gen.ml | 702 +- .../pytorch/Declarations-v1.4.0.yaml | 0 .../pytorch/Declarations-v1.5.0.yaml | 0 gen/{third_party => }/pytorch/LICENSE | 0 gen/{third_party => }/pytorch/README | 0 libtch/c-generated-sample.go | 732 - libtch/c-generated.go | 5487 +++++++ nn/conv-transpose.go | 6 +- nn/conv.go | 12 +- nn/init.go | 14 +- nn/linear.go | 6 +- nn/rnn.go | 8 +- nn/sequential.go | 4 +- nn/varstore.go | 22 +- nn/varstore_test.go | 12 +- tensor/jit_test.go | 2 +- tensor/macro.go | 3 - tensor/must-tensor-generated.go | 8043 ++++++++++ tensor/other.go | 12 +- tensor/patch.go | 154 + tensor/tensor-generated-sample.go | 2270 --- tensor/tensor-generated.go | 13035 ++++++++++++++++ tensor/tensor.go | 65 +- tensor/tensor.go1 | 35 + tensor/tensor_test.go | 22 +- vision/alexnet.go | 8 +- vision/cifar.go | 8 +- vision/dataset.go | 4 +- vision/densenet.go | 6 +- vision/efficientnet.go | 6 +- vision/imagenet.go | 16 +- vision/inception.go | 28 +- vision/mobilenet.go | 2 +- vision/resnet.go | 16 +- vision/squeezenet.go | 8 +- vision/vgg.go | 4 +- 47 files changed, 27449 insertions(+), 3370 deletions(-) rename example/{pretrained-models => pretrained-model}/main.go (100%) rename gen/{third_party => }/pytorch/Declarations-v1.4.0.yaml (100%) rename gen/{third_party => }/pytorch/Declarations-v1.5.0.yaml (100%) rename gen/{third_party => }/pytorch/LICENSE (100%) rename gen/{third_party => }/pytorch/README (100%) delete mode 100644 libtch/c-generated-sample.go create mode 100644 libtch/c-generated.go delete mode 100644 tensor/macro.go create mode 100644 tensor/must-tensor-generated.go create mode 100644 tensor/patch.go delete mode 100644 tensor/tensor-generated-sample.go create mode 100644 tensor/tensor-generated.go create mode 100644 tensor/tensor.go1 diff --git a/.gitignore b/.gitignore index e64d9af..1558fac 100644 --- a/.gitignore +++ b/.gitignore @@ -20,6 +20,7 @@ _build/ data/ example/testdata/ tmp/ +bak/ gen/.merlin **/*.rs.bk Cargo.lock diff --git a/example/cifar/main.go b/example/cifar/main.go index e8fe5db..f7064ca 100644 --- a/example/cifar/main.go +++ b/example/cifar/main.go @@ -143,7 +143,7 @@ func main() { loss := logits.CrossEntropyForLogits(devicedLabel) opt.BackwardStep(loss) - lossVal = loss.Values()[0] + lossVal = loss.Float64Values()[0] devicedData.MustDrop() devicedLabel.MustDrop() diff --git a/example/mnist/cnn.go b/example/mnist/cnn.go index 2d16f55..760c79e 100644 --- a/example/mnist/cnn.go +++ b/example/mnist/cnn.go @@ -133,7 +133,7 @@ func runCNN1() { vs.Freeze() testAccuracy := nn.BatchAccuracyForLogits(vs, net, testImages, testLabels, vs.Device(), 1024) vs.Unfreeze() - fmt.Printf("Epoch: %v\t Loss: %.2f \t Test accuracy: %.2f%%\n", epoch, epocLoss.Values()[0], testAccuracy*100.0) + fmt.Printf("Epoch: %v\t Loss: %.2f \t Test accuracy: %.2f%%\n", epoch, epocLoss.Float64Values()[0], testAccuracy*100.0) if testAccuracy > bestAccuracy { bestAccuracy = testAccuracy } diff --git a/example/mnist/linear.go b/example/mnist/linear.go index f30e92d..61e492a 100644 --- a/example/mnist/linear.go +++ b/example/mnist/linear.go @@ -20,30 +20,34 @@ func runLinear() { var ds vision.Dataset ds = vision.LoadMNISTDir(MnistDir) - device := (gotch.CPU).CInt() - dtype := (gotch.Float).CInt() + device := gotch.CPU + dtype := gotch.Float - ws := ts.MustZeros([]int64{ImageDim, Label}, dtype, device).MustSetRequiresGrad(true) - bs := ts.MustZeros([]int64{Label}, dtype, device).MustSetRequiresGrad(true) + ws := ts.MustZeros([]int64{ImageDim, Label}, dtype, device).MustSetRequiresGrad(true, false) + bs := ts.MustZeros([]int64{Label}, dtype, device).MustSetRequiresGrad(true, false) for epoch := 0; epoch < epochs; epoch++ { + weight := ts.NewTensor() + reduction := int64(1) // Mean of loss + ignoreIndex := int64(-100) + logits := ds.TrainImages.MustMm(ws, false).MustAdd(bs, true) - loss := logits.MustLogSoftmax(-1, dtype, true).MustNllLoss(ds.TrainLabels, true) + loss := logits.MustLogSoftmax(-1, dtype, true).MustNllLoss(ds.TrainLabels, weight, reduction, ignoreIndex, true) ws.ZeroGrad() bs.ZeroGrad() loss.MustBackward() ts.NoGrad(func() { - ws.Add_(ws.MustGrad().MustMul1(ts.FloatScalar(-1.0), true)) - bs.Add_(bs.MustGrad().MustMul1(ts.FloatScalar(-1.0), true)) + ws.Add_(ws.MustGrad(false).MustMul1(ts.FloatScalar(-1.0), true)) + bs.Add_(bs.MustGrad(false).MustMul1(ts.FloatScalar(-1.0), true)) }) testLogits := ds.TestImages.MustMm(ws, false).MustAdd(bs, true) - testAccuracy := testLogits.MustArgmax(-1, false, true).MustEq1(ds.TestLabels, true).MustTotype(gotch.Float, true).MustMean(gotch.Float.CInt(), true).MustView([]int64{-1}, true).MustFloat64Value([]int64{0}) + testAccuracy := testLogits.MustArgmax(-1, false, true).MustEq1(ds.TestLabels, true).MustTotype(gotch.Float, true).MustMean(gotch.Float, true).MustView([]int64{-1}, true).MustFloat64Value([]int64{0}) - fmt.Printf("Epoch: %v - Loss: %.3f - Test accuracy: %.2f%%\n", epoch, loss.Values()[0], testAccuracy*100) + fmt.Printf("Epoch: %v - Loss: %.3f - Test accuracy: %.2f%%\n", epoch, loss.Float64Values()[0], testAccuracy*100) loss.MustDrop() } diff --git a/example/mnist/nn.go b/example/mnist/nn.go index 199774c..66b2217 100644 --- a/example/mnist/nn.go +++ b/example/mnist/nn.go @@ -46,9 +46,9 @@ func train(trainX, trainY, testX, testY ts.Tensor, m ts.Module, opt nn.Optimizer testLogits := m.Forward(testX) testAccuracy := testLogits.AccuracyForLogits(testY) - accuracy := testAccuracy.Values()[0] * 100 + accuracy := testAccuracy.Float64Values()[0] * 100 testAccuracy.MustDrop() - lossVal := loss.Values()[0] + lossVal := loss.Float64Values()[0] loss.MustDrop() fmt.Printf("Epoch: %v \t Loss: %.3f \t Test accuracy: %.2f%%\n", epoch, lossVal, accuracy) diff --git a/example/neural-style-transfer/main.go b/example/neural-style-transfer/main.go index d5fbfdb..efe83e9 100644 --- a/example/neural-style-transfer/main.go +++ b/example/neural-style-transfer/main.go @@ -46,7 +46,7 @@ func gramMatrix(m ts.Tensor) (retVal ts.Tensor) { mview := m.MustView([]int64{a * b, c * d}, false) mviewT := mview.MustT(false) - gram := mview.MustMatMul(mviewT, true) + gram := mview.MustMatmul(mviewT, true) mviewT.MustDrop() return gram.MustDiv1(ts.IntScalar(a*b*c*d), true) @@ -57,7 +57,7 @@ func styleLoss(m1 ts.Tensor, m2 ts.Tensor) (retVal ts.Tensor) { // m1.MustDrop() gram2 := gramMatrix(m2) // m2.MustDrop() - loss := gram1.MustMseLoss(gram2, ts.ReductionMean.ToInt(), true) + loss := gram1.MustMseLoss(gram2, int64(ts.ReductionMean), true) gram2.MustDrop() return loss } @@ -89,8 +89,8 @@ func main() { cuda := gotch.CudaBuilder(0) device := cuda.CudaIfAvailable() - // device := gotch.CPU + netVS := nn.NewVarStore(device) in := vision.NewImageNet() net := vision.VGG16(netVS.Root(), in.ClassCount()) @@ -150,8 +150,8 @@ func main() { inputLayers := net.ForwardAllT(inputVar, false, maxLayer) // var sLoss ts.Tensor - sLoss := ts.MustZeros([]int64{1}, gotch.Float.CInt(), device.CInt()) - cLoss := ts.MustZeros([]int64{1}, gotch.Float.CInt(), device.CInt()) + sLoss := ts.MustZeros([]int64{1}, gotch.Float, device) + cLoss := ts.MustZeros([]int64{1}, gotch.Float, device) for _, idx := range StyleIndexes { l := styleLoss(inputLayers[idx], styleLayers[idx]) sLoss = sLoss.MustAdd(l, true) @@ -159,7 +159,7 @@ func main() { } for _, idx := range ContentIndexes { // NOTE: set `del` = true called panic at GPU train (tested on Colab) - l := inputLayers[idx].MustMseLoss(contentLayers[idx], ts.ReductionMean.ToInt(), false) + l := inputLayers[idx].MustMseLoss(contentLayers[idx], int64(ts.ReductionMean), false) cLoss = cLoss.MustAdd(l, true) l.MustDrop() } @@ -174,7 +174,7 @@ func main() { if (stepIdx % 1000) == 0 { clone := inputVar.MustShallowClone() - img := clone.MustDetach() + img := clone.MustDetach(false) imageTs := img.MustTo(gotch.CPU, true) clone.MustDrop() err := in.SaveImage(imageTs, fmt.Sprintf("../../data/neural-style-transfer/out%v.jpg", stepIdx)) @@ -184,7 +184,7 @@ func main() { imageTs.MustDrop() } - fmt.Printf("Step %v ... Done. Loss %10.1f\n", stepIdx, loss.Values()[0]) + fmt.Printf("Step %v ... Done. Loss %10.1f\n", stepIdx, loss.Float64Values()[0]) cLoss.MustDrop() loss.MustDrop() } diff --git a/example/pretrained-models/main.go b/example/pretrained-model/main.go similarity index 100% rename from example/pretrained-models/main.go rename to example/pretrained-model/main.go diff --git a/example/tensor-grad/main.go b/example/tensor-grad/main.go index 7a5d56a..69fbfb9 100644 --- a/example/tensor-grad/main.go +++ b/example/tensor-grad/main.go @@ -9,7 +9,7 @@ import ( func main() { x := tensor.TensorFrom([]float64{2.0}) - x = x.MustSetRequiresGrad(true) + x = x.MustSetRequiresGrad(true, false) x.ZeroGrad() xy := tensor.TensorFrom([]float64{2.0}) @@ -19,10 +19,10 @@ func main() { z := x.MustMul(xz, false) y.Backward() - xgrad := x.MustGrad() + xgrad := x.MustGrad(false) xgrad.Print() // [2.0] z.Backward() - xgrad = x.MustGrad() + xgrad = x.MustGrad(false) xgrad.Print() // [5.0] due to accumulated 2.0 + 3.0 isGradEnabled := tensor.MustGradSetEnabled(false) diff --git a/example/transfer-learning/main.go b/example/transfer-learning/main.go index dd5a762..36c62c2 100644 --- a/example/transfer-learning/main.go +++ b/example/transfer-learning/main.go @@ -78,6 +78,6 @@ func main() { loss.MustDrop() testAccuracy := testImages.Apply(linear).AccuracyForLogits(dataset.TestLabels) - fmt.Printf("Epoch %v\t Accuracy: %5.2f%%\n", epoch, testAccuracy.Values()[0]*100) + fmt.Printf("Epoch %v\t Accuracy: %5.2f%%\n", epoch, testAccuracy.Float64Values()[0]*100) } } diff --git a/example/yolo/darknet.go b/example/yolo/darknet.go index 29b50b1..4830601 100644 --- a/example/yolo/darknet.go +++ b/example/yolo/darknet.go @@ -270,7 +270,7 @@ func upsample(prevChannels int64) (retVal1 int64, retVal2 interface{}) { h := res[2] w := res[3] - return xs.MustUpsampleNearest2d([]int64{h * 2, w * 2}, 2.0, 2.0) + return xs.MustUpsampleNearest2d([]int64{h * 2, w * 2}, 2.0, 2.0, false) }) return prevChannels, Layer{Val: layer} @@ -396,7 +396,7 @@ func detect(xs ts.Tensor, imageHeight int64, classes int64, anchors []Anchor) (r xOffset := a.MustView([]int64{-1, 1}, true) yOffset := b.MustView([]int64{-1, 1}, true) - xyOffsetTmp1 := ts.MustCat([]ts.Tensor{xOffset, yOffset}, 1, false) + xyOffsetTmp1 := ts.MustCat([]ts.Tensor{xOffset, yOffset}, 1) xyOffsetTmp2 := xyOffsetTmp1.MustRepeat([]int64{1, nanchors}, true) xyOffsetTmp3 := xyOffsetTmp2.MustView([]int64{-1, 2}, true) xyOffset := xyOffsetTmp3.MustUnsqueeze(0, true) @@ -512,7 +512,7 @@ func (dn *Darknet) BuildModel(vs nn.Path) (retVal nn.FuncT) { for _, i := range route.TsIdxs { layers = append(layers, prevYs[int(i)]) } - ysTs = ts.MustCat(layers, 1, false) + ysTs = ts.MustCat(layers, 1) case "Shortcut": from := b.Bl.(Shortcut).TsIdx @@ -540,7 +540,7 @@ func (dn *Darknet) BuildModel(vs nn.Path) (retVal nn.FuncT) { prevYs = append(prevYs, ysTs) } // end of For loop - res = ts.MustCat(detections, 1, true) + res = ts.MustCat(detections, 1) // Now, free-up memory held up by prevYs for _, t := range prevYs { diff --git a/example/yolo/main.go b/example/yolo/main.go index d74c777..2040cdc 100644 --- a/example/yolo/main.go +++ b/example/yolo/main.go @@ -87,7 +87,7 @@ func report(pred ts.Tensor, img ts.Tensor, w int64, h int64) (retVal ts.Tensor) // Extract the bounding boxes for which confidence is above the threshold. for index := 0; index < int(npreds); index++ { predIdx := pred.MustGet(index) - var predVals []float64 = predIdx.Values() + var predVals []float64 = predIdx.Float64Values() predIdx.MustDrop() confidence := predVals[4] @@ -229,7 +229,7 @@ func main() { netHeight := darknet.Height() netWidth := darknet.Width() - imgClone := originalImage.MustShallowClone().MustDetach() + imgClone := originalImage.MustShallowClone().MustDetach(false) imageTs, err := vision.Resize(imgClone, netWidth, netHeight) if err != nil { diff --git a/gen/gen.ml b/gen/gen.ml index fd4a99e..6f8f1f2 100644 --- a/gen/gen.ml +++ b/gen/gen.ml @@ -1,6 +1,6 @@ -(* Automatically generate the C++ -> C -> rust bindings. +(* Automatically generate the C++ -> C -> Go bindings. This takes as input the Descriptions.yaml file that gets generated when - (Func.c_go_args_list func) building PyTorch from source. +func (Func.c_go_args_list func) building PyTorch from source. Run with: dune exec gen/gen.exe *) @@ -42,11 +42,12 @@ let no_tensor_options = ; "randint_like" ; "randn_like" ] -let prefixed_functions = - Set.of_list - (module String) - ["add"; "add_"; "div"; "div_"; "mul"; "mul_"; "sub"; "sub_"; "nll_loss"] - +(* + * let prefixed_functions = + * Set.of_list + * (module String) + * ["add"; "add_"; "div"; "div_"; "mul"; "mul_"; "sub"; "sub_"; "nll_loss"] + * *) let excluded_prefixes = ["_thnn_"; "_th_"; "thnn_"; "th_"] let excluded_suffixes = ["_forward"; "_forward_out"] @@ -178,153 +179,291 @@ module Func = struct Printf.failwithf "Method calls should have at least one argument %s" t.name () ) - let replace_map = - Map.of_alist_exn - (module String) - [ ("t", "tr") - ; ("where", "where_") - ; ("view", "view_") - ; ("unsafe", "unsafe_") ] + (* + * let replace_map = + * Map.of_alist_exn + * (module String) + * [ ("t", "tr") + * ; ("where", "where_") + * ; ("view", "view_") + * ; ("unsafe", "unsafe_") ] + * *) + + let is_method t = + List.exists t.args ~f:(fun arg -> + match arg.arg_name with "self" -> true | _ -> false ) let go_name name = - let name = - Map.find replace_map name |> Option.value ~default:name - |> String.capitalize - |> String.substr_replace_all ~pattern:"__" ~with_:"" + let last_underscore name = Str.string_match (Str.regexp ".*_$") name 0 in + let words = Str.split (Str.regexp "_") name in + if last_underscore name then + let cap_words = List.map words ~f:(fun word -> String.capitalize word) in + String.concat ~sep:"" cap_words ^ "_" + else + let cap_words = List.map words ~f:(fun word -> String.capitalize word) in + String.concat ~sep:"" cap_words + + let go_variable name = + let goname = go_name name in + (* NOTE: Deal with Go namespace conflict *) + let safe_name = + match goname with + | "Var" -> "vari" + | "Unsafe" -> "unsafety" + | _ -> goname in - if String.is_prefix name ~prefix:"_" then - "Internal" ^ (name |> String.capitalize) - else name |> String.capitalize + String.uncapitalize safe_name let c_go_args_list t = List.map t.args ~f:(fun arg -> - let an = arg.arg_name in + let an = go_variable arg.arg_name in let single_param = Printf.sprintf "%s %s" an in match arg.arg_type with - | Bool -> single_param "C.int" - | Int64 -> single_param "C.long" - | Double -> single_param "C.double" + | Bool -> single_param "int32" + | Int64 -> single_param "int64" + | Double -> single_param "float64" | Tensor -> single_param "Ctensor" | TensorOption -> single_param "Ctensor" | Scalar -> single_param "Cscalar" - | ScalarType -> single_param "C.int" - | Device -> single_param "C.int" - | String -> Printf.sprintf "%s_ptr C.int, %s_len C.int" an an - | IntList -> Printf.sprintf "%s_data C.long, %s_len C.int" an an - | TensorList -> Printf.sprintf "%s_data Ctensor, %s_len C.int" an an - | TensorOptions -> - Printf.sprintf "%s_kind C.int, %s_device C.int" an an ) + | ScalarType -> single_param "int32" + | Device -> single_param "int32" + | String -> single_param "string" + | IntList -> Printf.sprintf "%sData []int64, %sLen int" an an + | TensorList -> Printf.sprintf "%sData []Ctensor, %sLen int" an an + | TensorOptions -> Printf.sprintf "%sKind int32, %sDevice int32" an an + ) |> String.concat ~sep:", " let c_go_args_list_notype t = List.map t.args ~f:(fun arg -> - let an = arg.arg_name in + let an = go_variable arg.arg_name in + let an = match an with "var" -> "vari" | _ -> an in let single_param = Printf.sprintf "%s %s" an in match arg.arg_type with - | Bool -> single_param "" - | Int64 -> single_param "" - | Double -> single_param "" - | Tensor -> single_param "" - | TensorOption -> single_param "" + | Bool -> Printf.sprintf "c%s" an + | Int64 -> Printf.sprintf "c%s" an + | Double -> Printf.sprintf "c%s" an + | Tensor -> Printf.sprintf "%s" an + | TensorOption -> Printf.sprintf "%s" an | Scalar -> single_param "" - | ScalarType -> single_param "" - | Device -> single_param "" - | String -> Printf.sprintf "%s_ptr, %s_len" an an - | IntList -> Printf.sprintf "%s_data, %s_len" an an - | TensorList -> Printf.sprintf "%s_data, %s_len" an an - | TensorOptions -> Printf.sprintf "%s_kind, %s_device" an an ) + | ScalarType -> Printf.sprintf "c%s" an + | Device -> Printf.sprintf "c%s" an + | String -> Printf.sprintf "c%s, c%sLen" an an + | IntList -> Printf.sprintf "c%sDataPtr, c%sLen" an an + | TensorList -> Printf.sprintf "c%sDataPtr, c%sLen" an an + | TensorOptions -> Printf.sprintf "c%sKind, c%sDevice" an an ) |> String.concat ~sep:", " - let self_name = "self" + (* TODO: convert Go pointer to C pointer *) + let c_go_args_list_body t = + List.map t.args ~f:(fun arg -> + let an = go_variable arg.arg_name in + (* let single_param = Printf.sprintf "%s %s" an in *) + match arg.arg_type with + | Bool -> + Printf.sprintf "\nc%s := *(*C.int)(unsafe.Pointer(&%s))" an an + | Int64 -> + Printf.sprintf "\nc%s := *(*C.int64_t)(unsafe.Pointer(&%s))" an an + | Double -> + Printf.sprintf "\nc%s := *(*C.double)(unsafe.Pointer(&%s))" an an + | Tensor -> "" + | TensorOption -> "" + | Scalar -> "" + | ScalarType -> + Printf.sprintf "\nc%s := *(*C.int)(unsafe.Pointer(&%s))" an an + | Device -> + Printf.sprintf "\nc%s := *(*C.int)(unsafe.Pointer(&%s))" an an + | String -> + Printf.sprintf + "\n\ + c%s := C.CString(%s)\n\ + %sLen := len(%s)\n\ + c%sLen := *(*C.int)(unsafe.Pointer(&%sLen))" + an an an an an an + | IntList -> + Printf.sprintf + "\n\ + c%sDataPtr := (*C.int64_t)(unsafe.Pointer(&%sData[0]))\n\ + c%sLen := *(*C.int)(unsafe.Pointer(&%sLen))" + an an an an + | TensorList -> + Printf.sprintf + "\n\ + c%sDataPtr := (*Ctensor)(unsafe.Pointer(&%sData[0]))\n\ + c%sLen := *(*C.int)(unsafe.Pointer(&%sLen))" + an an an an + | TensorOptions -> + Printf.sprintf + "\n\ + c%sKind := *(*C.int)(unsafe.Pointer(&%sKind))\n\ + c%sDevice := *(*C.int)(unsafe.Pointer(&%sDevice))" + an an an an ) + |> String.concat ~sep:"" - (* let input_name = "input" *) + let self_name = "self" let self_tensor arg = match arg.arg_type with | Tensor -> String.( = ) arg.arg_name self_name | _ -> false - let type_parameters t = - let needs_scalar_parameter = - List.exists t.args ~f:(fun arg -> - match arg.arg_type with Scalar -> true | _ -> false ) - in - let needs_type_parameter = - List.exists t.args ~f:(fun arg -> - match arg.arg_type with - | TensorList | TensorOption -> true - | _ -> false ) - in - if needs_type_parameter && needs_scalar_parameter then "Tensor, Scalar" - else if needs_type_parameter then "Tensor" - else if needs_scalar_parameter then "Scalar" - else "" + (* + * let type_parameters t = + * let needs_scalar_parameter = + * List.exists t.args ~f:(fun arg -> + * match arg.arg_type with Scalar -> true | _ -> false ) + * in + * let needs_type_parameter = + * List.exists t.args ~f:(fun arg -> + * match arg.arg_type with + * | TensorList | TensorOption -> true + * | _ -> false ) + * in + * if needs_type_parameter && needs_scalar_parameter then "Tensor, Scalar" + * else if needs_type_parameter then "Tensor" + * else if needs_scalar_parameter then "Scalar" + * else "" + * *) + + (* + * let go_args_list t = + * (* https://ocaml.janestreet.com/ocaml-core/latest/doc/base/Base/List/#val-partition_tf *) + * (* TODO. implement special cases - TensorOptions, ... *) + * match List.partition_tf t.args ~f:self_tensor with _, args_list -> + * args_list + * *) - let go_args_list t = - (* https://ocaml.janestreet.com/ocaml-core/latest/doc/base/Base/List/#val-partition_tf *) - match List.partition_tf t.args ~f:self_tensor with _, args_list -> - args_list + let is_inplace t = + match Str.string_match (Str.regexp ".*_$") t.name 0 with + | true -> true + | _ -> false let go_typed_args_list t = let to_string args = - List.map args ~f:(fun arg -> - let go_arg_type = + let args_list = + List.map args ~f:(fun arg -> + let go_arg_type = + match arg.arg_type with + | Bool -> "bool" + | Int64 -> "int64" + | Double -> "float64" + | Tensor -> "Tensor" + | TensorOption -> "Tensor" + | IntList -> "[]int64" + | TensorList -> "[]Tensor" + | String -> "string" + (* TODO. Struct{Kind gotch.DType Device gotch.Device} *) + (* E.g. `type KindDevice struct{}` *) + | TensorOptions -> "gotch.KindDevice" + | Scalar -> "Scalar" + | ScalarType -> "gotch.DType" + | Device -> "gotch.Device" + in match arg.arg_type with - | Bool -> "bool" - | Int64 -> "int64" - | Double -> "float64" - | Tensor -> "Tensor" - | TensorOption -> "TensorOption" - | IntList -> "[]int64" - | TensorList -> "[]Tensor" - | String -> "string" - | TensorOptions -> "(Kind, Device)" - | Scalar -> "Scalar" - | ScalarType -> "Kind" - | Device -> "Device" - in - Printf.sprintf "%s %s" (go_name arg.arg_name) go_arg_type ) - |> String.concat ~sep:", " + | TensorOptions -> + Printf.sprintf "%sKind gotch.DType, %sDevice gotch.Device" + (go_variable arg.arg_name) (go_variable arg.arg_name) + | _ -> + Printf.sprintf "%s %s" (go_variable arg.arg_name) go_arg_type + ) + in + if is_method t && not (is_inplace t) then + args_list @ ["del bool"] |> String.concat ~sep:", " + else args_list |> String.concat ~sep:", " in - let self_arg = - "self Tensor" - (* if String.is_suffix t.name ~suffix:"_" then "self" else "&self" *) + (* let self_arg = "self Tensor" in *) + match List.partition_tf t.args ~f:self_tensor with _, args_list -> + Printf.sprintf "%s" (to_string args_list) + + let go_notype_args_list t = + let to_string args = + let args_list = + List.map args ~f:(fun arg -> + match arg.arg_type with + | TensorOptions -> + Printf.sprintf "%sKind, %sDevice" (go_variable arg.arg_name) + (go_variable arg.arg_name) + | _ -> Printf.sprintf "%s" (go_variable arg.arg_name) ) + in + if is_method t && not (is_inplace t) then + args_list @ ["del"] |> String.concat ~sep:", " + else args_list |> String.concat ~sep:", " in match List.partition_tf t.args ~f:self_tensor with _, args_list -> - Printf.sprintf "%s, %s" self_arg (to_string args_list) + Printf.sprintf "%s" (to_string args_list) let go_return_type t ~fallible = + (* printf "t name: %s\n" t.name ; *) let returns = match t.returns with - | `fixed 1 -> "Tensor" + | `fixed 1 -> "retVal Tensor" | `fixed v -> - List.init v ~f:(fun _ -> "Tensor") - |> String.concat ~sep:", " |> Printf.sprintf "(%s)" - | `dynamic -> "[]Tensor" + List.init v ~f:(fun i -> Printf.sprintf "retVal%d Tensor" i) + |> String.concat ~sep:", " |> Printf.sprintf "%s" + | `dynamic -> "retVal []Tensor" in - if fallible then Printf.sprintf "(error, %s)" returns - else Printf.sprintf " %s" returns + if is_inplace t then + if fallible then Printf.sprintf "err error" else Printf.sprintf "" + else if fallible then Printf.sprintf "%s, err error" returns + else Printf.sprintf "%s" returns + + let go_return_notype t ~fallible = + let returns = + match t.returns with + | `fixed 1 -> "retVal" + | `fixed v -> + List.init v ~f:(fun i -> Printf.sprintf "retVal%d" i) + |> String.concat ~sep:", " |> Printf.sprintf "%s" + | `dynamic -> "retVal" + in + if is_inplace t then + if fallible then Printf.sprintf "err" else Printf.sprintf "" + else if fallible then Printf.sprintf "%s, err" returns + else Printf.sprintf "%s" returns let go_binding_args t = List.map t.args ~f:(fun arg -> - let name = go_name arg.arg_name in + let name = go_variable arg.arg_name in match arg.arg_type with - | Tensor -> Printf.sprintf "%s.c_tensor" name - | Scalar -> Printf.sprintf "%s.c_scalar" name - | Bool -> Printf.sprintf "if %s { 1 } else { 0 }" name - | ScalarType -> Printf.sprintf "%s.c_int()" name - | Device -> Printf.sprintf "%s.c_int()" name + | Tensor -> + if String.( = ) name "self" then "ts.ctensor" + else Printf.sprintf "%s.ctensor" name + | Scalar -> Printf.sprintf "%s.cscalar" name + | Bool -> Printf.sprintf "c%s" name + | ScalarType -> Printf.sprintf "%s.CInt()" name + | Device -> Printf.sprintf "%s.CInt()" name | TensorOptions -> - Printf.sprintf "%s.0.c_int(), %s.1.c_int()" name name - | String -> Printf.sprintf "%s.as_ptr(), %s.len() int32" name name - | IntList -> Printf.sprintf "%s.as_ptr(), %s.len() int32" name name - | TensorList -> - Printf.sprintf "ptr_list(%s).as_ptr(), %s.len() int32" name name - | TensorOption -> Printf.sprintf "%s.c_tensor)" name - | Int64 when String.( = ) name "reduction" -> "reduction.to_int()" + Printf.sprintf "%sKind.CInt(), %sDevice.CInt()" name name + | String -> Printf.sprintf "%s" name + | IntList -> Printf.sprintf "%s, len(%s)" name name + | TensorList -> Printf.sprintf "c%s, len(c%s)" name name + | TensorOption -> Printf.sprintf "%s.ctensor" name | _ -> name ) - (* |> String.concat ~sep:",\n " *) |> String.concat ~sep:", " + + let go_binding_body t = + List.map t.args ~f:(fun arg -> + let an = go_variable arg.arg_name in + match arg.arg_type with + | Bool -> + Printf.sprintf "c%s := int32(0)\n if %s { c%s = int32(1) }\n" an an + an + | Int64 -> "" + | Double -> "" + | Tensor -> "" + | TensorOption -> "" + | Scalar -> "" + | ScalarType -> "" + | Device -> "" + | String -> "" + | IntList -> "" + | TensorList -> + Printf.sprintf + " var c%s []lib.Ctensor\n\ + \ for _, t := range %s {c%s = append(c%s, t.ctensor)}\n" + an an an an + | TensorOptions -> "" ) + |> String.concat ~sep:"" end exception Not_a_simple_arg @@ -494,110 +633,280 @@ let write_cpp funcs filename = ph "tensor *atg_%s(%s);" exported_name c_typed_args_list ) ) ) -let write_fallible_wrapper funcs filename = - Out_channel.with_file filename ~f:(fun out_ml -> - let pm s = print_inline out_ml s in - pm "/* THIS FILE IS AUTOMATICALLY GENERATED, DO NOT EDIT BY HAND! */" ; - pm "\n" ; - pm "package libtch" ; - pm "\n\n" ; - pm "func ptr_list(l []Tensor) []*C_tensor {\n" ; - pm " var retVal []*C_tensor \n" ; - pm " for _, x := range l{ \n" ; - pm " retVal = append(retVal, x) \n" ; - pm " } \n" ; - pm "} \n" ; - pm "\n" ; - (* Implement Tensor *) - Map.iteri funcs ~f:(fun ~key:exported_name ~data:(func : Func.t) -> - let go_name = Func.go_name exported_name in - let go_args_list = Func.go_typed_args_list func in - pm "\n" ; - pm "func f_%s%s(" go_name (Func.type_parameters func) ; - pm "%s" go_args_list ; - pm ")%s { \n" (Func.go_return_type func ~fallible:true) ; - match func.returns with - | `dynamic -> - pm " c_tensors := unsafe_torch_err!({" ; - pm "atg_%s(" exported_name ; - pm "%s)}) \n" (Func.go_binding_args func) ; - pm " var r__ []Tensor \n" ; - pm " i := 0 \n" ; - pm " for { \n" ; - pm " c__ := unsafe{*c_tensors.add(i)} \n" ; - pm " if c__.is_null() { break } \n" ; - pm " r__ = append(r__, Tensor {C_tensor: c__}) \n" ; - pm " i += 1 \n" ; - pm " } \n" ; - (* pm " // unsafe{libc::free(c_tensors as *mut libc::c_void)}" ; *) - pm " return r__ \n" ; - pm "} \n" - | `fixed ntensors -> - pm " var c_tensors []C_tensor = make([]C_tensor, %d) \n" - ntensors ; - pm " unsafe_torch_err({ \n" ; - pm " atg_%s(c_tensors, " exported_name ; - pm "%s) \n" (Func.go_binding_args func) ; - pm " }) \n" ; - let returns = - if ntensors = 1 then "Tensor { C_tensor: c_tensors[0] }" - else - List.init ntensors - ~f:(Printf.sprintf "Tensor { C_tensor: c_tensors[%d] }") - |> String.concat ~sep:", " |> Printf.sprintf "(%s)" - in - pm " return %s \n" returns ; - pm "} \n" ) ) - let write_wrapper funcs filename = Out_channel.with_file filename ~f:(fun out_ml -> let pm s = print_inline out_ml s in - pm "/* THIS FILE IS AUTOMATICALLY GENERATED, DO NOT EDIT BY HAND! */" ; + pm "package tensor" ; pm "\n\n" ; - pm "package libtch" ; + pm "// NOTE. THIS FILE IS AUTOMATICALLY GENERATED, DO NOT EDIT BY HAND!" ; pm "\n\n" ; - Map.iteri funcs ~f:(fun ~key:exported_name ~data:(func : Func.t) -> - let go_name = Func.go_name exported_name in - let go_name, fallible_go_name = - if Set.mem prefixed_functions func.name then - ("g_" ^ go_name, "f_" ^ go_name) - else (go_name, "f_" ^ go_name) + pm "// #include \"stdlib.h\"\n" ; + pm "import \"C\"" ; + pm "" ; + pm "\n\n" ; + pm "import(\n" ; + pm " \"unsafe\"\n" ; + pm "\n" ; + pm " \"github.com/sugarme/gotch\"\n" ; + pm " lib \"github.com/sugarme/gotch/libtch\"\n" ; + pm ")" ; + pm "\n\n" ; + Map.iteri funcs ~f:(fun ~key:exported_name ~data:func -> + let is_method = Func.is_method func in + let is_inplace = Func.is_inplace func in + (* NOTE. `torch.__PATTERN` *) + let prefix_2underscore exported_name = + Str.string_match (Str.regexp "^__") exported_name 0 in - pm "\n" ; - pm "func %s%s(" go_name (Func.type_parameters func) ; + (* NOTE. `torch._PATTERN` *) + let prefix_1underscore exported_name = + Str.string_match (Str.regexp "^_") exported_name 0 + in + (* NOTE. `torch.PATTERN_1` *) + let suffix_1 exported_name = + Str.string_match (Str.regexp ".*_1$") exported_name 0 + in + let gofunc_name = + if prefix_2underscore exported_name then + "__" ^ Func.go_name exported_name + else if prefix_1underscore exported_name then + "_" ^ Func.go_name exported_name + else if suffix_1 exported_name then + Func.go_name exported_name ^ "_" + else Func.go_name exported_name + in + let cfunc_name = "lib.Atg" ^ gofunc_name in let go_args_list = Func.go_typed_args_list func in - pm "%s" go_args_list ; - pm ")%s {\n" (Func.go_return_type func ~fallible:false) ; - let go_args_list = Func.go_args_list func in - let go_args_list = - List.map go_args_list ~f:(fun arg -> Func.go_name arg.Func.arg_name) - |> String.concat ~sep:", " + (* NOTE. temporarily excluding these functions as not implemented at FFI *) + (* TODO. implement multiple tensors return function []Tensor *) + let excluded_funcs = + [ "Chunk" + ; "AlignTensors" + ; "BroadcastTensors" + ; "Meshgrid" + ; "NonzeroNumpy" + ; "Split" + ; "SplitWithSizes" + ; "Unbind" + ; "Where" ] in - pm " %s(%s)\n" fallible_go_name go_args_list ; - pm "}\n" ) ; + if + List.exists excluded_funcs ~f:(fun name -> + String.( = ) name gofunc_name ) + then pm "" + else + match func.returns with + | `dynamic -> + pm "\n" ; + if is_method then pm "func(ts Tensor) %s(" gofunc_name + else pm "func %s(" gofunc_name ; + pm "%s" go_args_list ; + pm ")(%s) { \n" (Func.go_return_type func ~fallible:true) ; + if is_method && not is_inplace then + pm "if del { defer ts.MustDrop() }\n" ; + pm " ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))\n" ; + pm " \n" ; + pm " %s" (Func.go_binding_body func) ; + pm "%s(ptr, %s)\n" cfunc_name (Func.go_binding_args func) ; + pm " if err = TorchErr(); err != nil {\n" ; + pm " return %s\n" + (Func.go_return_notype func ~fallible:true) ; + pm " }\n" ; + (* NOTE. if in_place method, no retVal return *) + if not (Func.is_inplace func) then + pm " retVal = Tensor{ctensor: *ptr}\n" ; + pm " \n" ; + pm " return %s\n" (Func.go_return_notype func ~fallible:true) ; + pm "} \n" + | `fixed 1 -> + pm "\n" ; + if is_method then pm "func(ts Tensor) %s(" gofunc_name + else pm "func %s(" gofunc_name ; + pm "%s" go_args_list ; + pm ")(%s) { \n" (Func.go_return_type func ~fallible:true) ; + if is_method && not is_inplace then + pm "if del { defer ts.MustDrop() }\n" ; + pm " ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0)))\n" ; + pm " \n" ; + pm " %s" (Func.go_binding_body func) ; + pm "%s(ptr, %s)\n" cfunc_name (Func.go_binding_args func) ; + pm " if err = TorchErr(); err != nil {\n" ; + pm " return %s\n" + (Func.go_return_notype func ~fallible:true) ; + pm " }\n" ; + (* NOTE. if in_place method, no retVal return *) + if not (Func.is_inplace func) then + pm " retVal = Tensor{ctensor: *ptr}\n" ; + pm " \n" ; + pm " return %s\n" (Func.go_return_notype func ~fallible:true) ; + pm "} \n" + | `fixed _ -> pm "" ) ; + (* TODO. implement for return multiple tensor - []Tensor *) + pm "// End of implementing Tensor ================================= \n" + ) + +let write_must_wrapper funcs filename = + Out_channel.with_file filename ~f:(fun out_ml -> + let pm s = print_inline out_ml s in + pm "package tensor" ; + pm "\n\n" ; + pm "// NOTE. THIS FILE IS AUTOMATICALLY GENERATED, DO NOT EDIT BY HAND!" ; + pm "\n\n" ; + pm "import(\n" ; + pm " \"log\"\n" ; + pm "\n" ; + pm " \"github.com/sugarme/gotch\"\n" ; + pm ")" ; + pm "\n\n" ; + Map.iteri funcs ~f:(fun ~key:exported_name ~data:func -> + let is_method = Func.is_method func in + (* NOTE. `torch.__PATTERN` *) + let prefix_2underscore exported_name = + Str.string_match (Str.regexp "^__") exported_name 0 + in + (* NOTE. `torch._PATTERN` *) + let prefix_1underscore exported_name = + Str.string_match (Str.regexp "^_") exported_name 0 + in + (* NOTE. `torch.PATTERN_1` *) + let suffix_1 exported_name = + Str.string_match (Str.regexp ".*_1$") exported_name 0 + in + let gofunc_name = + if prefix_2underscore exported_name then + "__" ^ Func.go_name exported_name + else if prefix_1underscore exported_name then + "_" ^ Func.go_name exported_name + else if suffix_1 exported_name then + Func.go_name exported_name ^ "_" + else Func.go_name exported_name + in + let go_args_list = Func.go_typed_args_list func in + let go_args_list_notype = Func.go_notype_args_list func in + (* NOTE. temporarily excluding these functions as not implemented at FFI *) + (* TODO. implement multiple tensors return function []Tensor *) + let excluded_funcs = + [ "Chunk" + ; "AlignTensors" + ; "BroadcastTensors" + ; "Meshgrid" + ; "NonzeroNumpy" + ; "Split" + ; "SplitWithSizes" + ; "Unbind" + ; "Where" ] + in + if + List.exists excluded_funcs ~f:(fun name -> + String.( = ) name gofunc_name ) + then pm "" + else + match func.returns with + | `dynamic -> + pm "\n" ; + if is_method then pm "func(ts Tensor) %s(" gofunc_name + else pm "func Must%s(" gofunc_name ; + pm "%s" go_args_list ; + pm ")(%s) { \n" (Func.go_return_type func ~fallible:false) ; + pm " \n" ; + if is_method then + pm " retVal, err := ts.%s(%s)\n" gofunc_name + go_args_list_notype + else + pm " retVal, err := %s(%s)\n" gofunc_name + go_args_list_notype ; + pm " if err != nil { log.Fatal(err) }\n" ; + pm " \n" ; + pm " return %s\n" (Func.go_return_notype func ~fallible:false) ; + pm "} \n" + | `fixed 1 -> + pm "\n" ; + if is_method then pm "func(ts Tensor) Must%s(" gofunc_name + else pm "func Must%s(" gofunc_name ; + pm "%s" go_args_list ; + pm ")(%s) { \n" (Func.go_return_type func ~fallible:false) ; + pm " \n" ; + (* NOTE. No return retVal for in_place method *) + if Func.is_inplace func then + if is_method then + pm " err := ts.%s(%s)\n" gofunc_name go_args_list_notype + else pm " err := %s(%s)\n" gofunc_name go_args_list_notype + else if is_method then + pm " retVal, err := ts.%s(%s)\n" gofunc_name + go_args_list_notype + else + pm " retVal, err := %s(%s)\n" gofunc_name + go_args_list_notype ; + pm " if err != nil { log.Fatal(err) }\n" ; + pm " \n" ; + pm " return %s\n" (Func.go_return_notype func ~fallible:false) ; + pm "} \n" + | `fixed _ -> pm "" ) ; + (* TODO. implement for return multiple tensor - []Tensor *) pm "// End of implementing Tensor ================================= \n" ) let write_ffi funcs filename = Out_channel.with_file filename ~f:(fun out_ml -> let pm s = p out_ml s in - pm "/* THIS FILE IS AUTOMATICALLY GENERATED, DO NOT EDIT BY HAND! */" ; pm "package libtch" ; pm "" ; - pm "// #include \"stdbool.h\" " ; - pm "// #include \"torch_api.h\" " ; + pm "// NOTE. THIS FILE IS AUTOMATICALLY GENERATED, DO NOT EDIT BY HAND!" ; + pm "" ; + pm "//#include \"stdbool.h\" " ; + pm "//#include \"torch_api.h\" " ; pm "import \"C\"" ; pm "" ; + pm "import \"unsafe\"" ; + pm "" ; Map.iteri funcs ~f:(fun ~key:exported_name ~data:func -> + (* let is_method = *) + (* match func.Func.kind with `method_ -> true | `function_ -> false *) + (* in *) + (* let is_inplace = *) + (* Func.is_inplace func *) + (* + * match exported_name with + * | "add_1" -> true + * | "sub_1" -> true + * | "div_1" -> true + * | "mul_1" -> true + * | _ -> false + * *) + (* in *) + (* NOTE. `torch.__PATTERN` *) + let prefix_2underscore exported_name = + Str.string_match (Str.regexp "^__") exported_name 0 + in + (* NOTE. `torch._PATTERN` *) + let prefix_1underscore exported_name = + Str.string_match (Str.regexp "^_") exported_name 0 + in + (* NOTE. `torch.PATTERN_1` *) + let suffix_1 exported_name = + Str.string_match (Str.regexp ".*_1$") exported_name 0 + in + let ffifunc_name = + if prefix_2underscore exported_name then + "__" ^ Func.go_name exported_name + else if prefix_1underscore exported_name then + "_" ^ Func.go_name exported_name + else if suffix_1 exported_name then + Func.go_name exported_name ^ "_" + else Func.go_name exported_name + in match func.Func.returns with | `fixed _ -> - pm "func Atg_%s(ptr *Ctensor, %s){C.atg_%s(ptr, %s)}" - (Func.go_name exported_name) - (Func.c_go_args_list func) exported_name + pm "func Atg%s(ptr *Ctensor, %s){%s \nC.atg_%s(ptr, %s)\n}" + ffifunc_name (Func.c_go_args_list func) + (Func.c_go_args_list_body func) + exported_name (Func.c_go_args_list_notype func) - | `dynamic -> - pm "func Atg_%s(%s)(*Ctensor)" exported_name - (Func.c_go_args_list func) ) ) + | `dynamic -> pm "" + (* TODO: need more implement here *) + (* pm "func Atg%s(%s)(retValPtr *Ctensor)" *) + (* (Func.go_name exported_name) *) + (* (Func.c_go_args_list func) *) ) ) let methods = let c name args = {Func.name; args; returns= `fixed 1; kind= `method_} in @@ -607,8 +916,8 @@ let methods = ; c "toType" [ca "self" Tensor; ca "scalar_type" ScalarType] ; c "to" [ca "self" Tensor; ca "device" Device] ] -let run ~yaml_filename ~cpp_filename ~ffi_filename ~wrapper_filename - ~fallible_wrapper_filename = +let run ~yaml_filename ~cpp_filename ~ffi_filename ~must_wrapper_filename + ~wrapper_filename = let funcs = read_yaml yaml_filename in let funcs = methods @ funcs in printf "Generating code for %d functions.\n%!" (List.length funcs) ; @@ -631,11 +940,12 @@ let run ~yaml_filename ~cpp_filename ~ffi_filename ~wrapper_filename in write_cpp funcs cpp_filename ; write_ffi funcs ffi_filename ; - write_wrapper funcs wrapper_filename ; - write_fallible_wrapper funcs fallible_wrapper_filename + write_must_wrapper funcs must_wrapper_filename ; + write_wrapper funcs wrapper_filename let () = - run ~yaml_filename:"third_party/pytorch/Declarations-v1.5.0.yaml" - ~cpp_filename:"tmp/torch_api_generated" ~ffi_filename:"tmp/c_generated.go" - ~wrapper_filename:"tmp/tensor_generated.go" - ~fallible_wrapper_filename:"tmp/tensor_fallible_generated.go" + run ~yaml_filename:"gen/pytorch/Declarations-v1.5.0.yaml" + ~cpp_filename:"libtch/torch_api_generated" + ~ffi_filename:"libtch/c-generated.go" + ~must_wrapper_filename:"tensor/must-tensor-generated.go" + ~wrapper_filename:"tensor/tensor-generated.go" diff --git a/gen/third_party/pytorch/Declarations-v1.4.0.yaml b/gen/pytorch/Declarations-v1.4.0.yaml similarity index 100% rename from gen/third_party/pytorch/Declarations-v1.4.0.yaml rename to gen/pytorch/Declarations-v1.4.0.yaml diff --git a/gen/third_party/pytorch/Declarations-v1.5.0.yaml b/gen/pytorch/Declarations-v1.5.0.yaml similarity index 100% rename from gen/third_party/pytorch/Declarations-v1.5.0.yaml rename to gen/pytorch/Declarations-v1.5.0.yaml diff --git a/gen/third_party/pytorch/LICENSE b/gen/pytorch/LICENSE similarity index 100% rename from gen/third_party/pytorch/LICENSE rename to gen/pytorch/LICENSE diff --git a/gen/third_party/pytorch/README b/gen/pytorch/README similarity index 100% rename from gen/third_party/pytorch/README rename to gen/pytorch/README diff --git a/libtch/c-generated-sample.go b/libtch/c-generated-sample.go deleted file mode 100644 index ad8298d..0000000 --- a/libtch/c-generated-sample.go +++ /dev/null @@ -1,732 +0,0 @@ -// NOTE: this is a sample for OCaml generated code for `c-generated.go` -package libtch - -//#include "stdbool.h" -//#include "torch_api.h" -import "C" - -import ( - "unsafe" -) - -// void atg_eq1(tensor *, tensor self, tensor other); -func AtgEq1(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg_eq1(ptr, self, other) -} - -// void atg_matmul(tensor *, tensor self, tensor other); -func AtgMatmul(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg_matmul(ptr, self, other) -} - -// void atg_to(tensor *, tensor self, int device); -func AtgTo(ptr *Ctensor, self Ctensor, device int) { - cdevice := *(*C.int)(unsafe.Pointer(&device)) - C.atg_to(ptr, self, cdevice) -} - -// void atg_grad(tensor *, tensor self); -func AtgGrad(ptr *Ctensor, self Ctensor) { - C.atg_grad(ptr, self) -} - -// void atg_detach_(tensor *, tensor self); -func AtgDetach_(ptr *Ctensor, self Ctensor) { - C.atg_detach_(ptr, self) -} - -// void atg_detach(tensor *, tensor self); -func AtgDetach(ptr *Ctensor, self Ctensor) { - C.atg_detach(ptr, self) -} - -// void atg_zero_(tensor *, tensor self); -func AtgZero_(ptr *Ctensor, self Ctensor) { - C.atg_zero_(ptr, self) -} - -// void atg_set_requires_grad(tensor *, tensor self, int r); -func AtgSetRequiresGrad(ptr *Ctensor, self Ctensor, r int) { - cr := *(*C.int)(unsafe.Pointer(&r)) - C.atg_set_requires_grad(ptr, self, cr) -} - -// void atg_mul(tensor *, tensor self, tensor other); -func AtgMul(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg_mul(ptr, self, other) -} - -// void atg_mul_(tensor *, tensor self, tensor other); -func AtgMul_(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg_mul_(ptr, self, other) -} - -// void atg_mul1(tensor *, tensor self, scalar other); -func AtgMul1(ptr *Ctensor, self Ctensor, other Cscalar) { - C.atg_mul1(ptr, self, other) -} - -// void atg_mul_1(tensor *, tensor self, scalar other); -func AtgMul1_(ptr *Ctensor, self Ctensor, other Cscalar) { - C.atg_mul_1(ptr, self, other) -} - -// void atg_add(tensor *, tensor self, tensor other); -func AtgAdd(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg_add(ptr, self, other) -} - -// void atg_add_(tensor *, tensor self, tensor other); -func AtgAdd_(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg_add_(ptr, self, other) -} - -// id atg_add1(tensor *, tensor self, scalar other); -func AtgAdd1(ptr *Ctensor, self Ctensor, other Cscalar) { - C.atg_add1(ptr, self, other) -} - -// void atg_add_1(tensor *, tensor self, scalar other); -func AtgAdd1_(ptr *Ctensor, self Ctensor, other Cscalar) { - C.atg_add_1(ptr, self, other) -} - -// void atg_totype(tensor *, tensor self, int scalar_type); -func AtgTotype(ptr *Ctensor, self Ctensor, scalar_type int32) { - cscalar_type := *(*C.int)(unsafe.Pointer(&scalar_type)) - C.atg_totype(ptr, self, cscalar_type) -} - -// void atg_unsqueeze(tensor *, tensor self, int64_t dim); -func AtgUnsqueeze(ptr *Ctensor, self Ctensor, dim int64) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - C.atg_unsqueeze(ptr, self, cdim) -} - -// void atg_select(tensor *, tensor self, int64_t dim, int64_t index); -func AtgSelect(ptr *Ctensor, self Ctensor, dim int64, index int64) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - cindex := *(*C.int64_t)(unsafe.Pointer(&index)) - C.atg_select(ptr, self, cdim, cindex) -} - -// void atg_narrow(tensor *, tensor self, int64_t dim, int64_t start, int64_t length); -func AtgNarrow(ptr *Ctensor, self Ctensor, dim int64, start int64, length int64) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - cstart := *(*C.int64_t)(unsafe.Pointer(&start)) - clength := *(*C.int64_t)(unsafe.Pointer(&length)) - C.atg_narrow(ptr, self, cdim, cstart, clength) -} - -// void atg_index_select(tensor *, tensor self, int64_t dim, tensor index); -func AtgIndexSelect(ptr *Ctensor, self Ctensor, dim int64, index Ctensor) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - C.atg_index_select(ptr, self, cdim, index) -} - -// void atg_zeros(tensor *, int64_t *size_data, int size_len, int options_kind, int options_device); -func AtgZeros(ptr *Ctensor, sizeData []int64, sizeLen int, optionsKind, optionsDevice int32) { - // just get pointer of the first element of the shape(sizeData) - csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) - csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) - coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) - coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) - - C.atg_zeros(ptr, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice) -} - -// void atg_ones(tensor *, int64_t *size_data, int size_len, int options_kind, int options_device); -func AtgOnes(ptr *Ctensor, sizeData []int64, sizeLen int, optionsKind, optionsDevice int32) { - // just get pointer of the first element of the shape(sizeData) - csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) - csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) - coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) - coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) - - C.atg_ones(ptr, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice) -} - -// void atg_uniform_(tensor *, tensor self, double from, double to); -func AtgUniform_(ptr *Ctensor, self Ctensor, from float64, to float64) { - cfrom := *(*C.double)(unsafe.Pointer(&from)) - cto := *(*C.double)(unsafe.Pointer(&to)) - - C.atg_uniform_(ptr, self, cfrom, cto) -} - -// void atg_zeros_like(tensor *, tensor self); -func AtgZerosLike(ptr *Ctensor, self Ctensor) { - C.atg_zeros_like(ptr, self) -} - -// void atg_fill_(tensor *, tensor self, scalar value); -func AtgFill_(ptr *Ctensor, self Ctensor, value Cscalar) { - C.atg_fill_(ptr, self, value) -} - -// void atg_randn_like(tensor *, tensor self); -func AtgRandnLike(ptr *Ctensor, self Ctensor) { - C.atg_rand_like(ptr, self) -} - -// void atg_log_softmax(tensor *, tensor self, int64_t dim, int dtype); -func AtgLogSoftmax(ptr *Ctensor, self Ctensor, dim int64, dtype int32) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - cdtype := *(*C.int)(unsafe.Pointer(&dtype)) - - C.atg_log_softmax(ptr, self, cdim, cdtype) -} - -// void atg_nll_loss(tensor *, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index); -func AtgNllLoss(ptr *Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64, ignoreIndex int64) { - creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) - cignoreIndex := *(*C.int64_t)(unsafe.Pointer(&ignoreIndex)) - - C.atg_nll_loss(ptr, self, target, weight, creduction, cignoreIndex) -} - -// void atg_argmax(tensor *, tensor self, int64_t dim, int keepdim); -func AtgArgmax(ptr *Ctensor, self Ctensor, dim int64, keepDim int) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - ckeepDim := *(*C.int)(unsafe.Pointer(&keepDim)) - - C.atg_argmax(ptr, self, cdim, ckeepDim) -} - -// void atg_mean(tensor *, tensor self, int dtype); -func AtgMean(ptr *Ctensor, self Ctensor, dtype int32) { - cdtype := *(*C.int)(unsafe.Pointer(&dtype)) - - C.atg_mean(ptr, self, cdtype) -} - -// void atg_mean1(tensor *, tensor self, int64_t *dim_data, int dim_len, int keepdim, int dtype); -func AtgMean1(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, keepDim int, dtype int32) { - - cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) - cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) - ckeepDim := *(*C.int)(unsafe.Pointer(&keepDim)) - cdtype := *(*C.int)(unsafe.Pointer(&dtype)) - - C.atg_mean1(ptr, self, cdimDataPtr, cdimLen, ckeepDim, cdtype) -} - -// void atg_permute(tensor *, tensor self, int64_t *dims_data, int dims_len); -func AtgPermute(ptr *Ctensor, self Ctensor, dims []int64, dimLen int) { - // just get pointer of the first element of the shape - cdimsPtr := (*C.int64_t)(unsafe.Pointer(&dims[0])) - cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) - - C.atg_permute(ptr, self, cdimsPtr, cdimLen) -} - -// void atg_squeeze1(tensor *, tensor self, int64_t dim); -func AtgSqueeze1(ptr *Ctensor, self Ctensor, dim int64) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - - C.atg_squeeze1(ptr, self, cdim) -} - -// void atg_squeeze_(tensor *, tensor self); -func AtgSqueeze_(ptr *Ctensor, self Ctensor) { - C.atg_squeeze_(ptr, self) -} - -// void atg_stack(tensor *, tensor *tensors_data, int tensors_len, int64_t dim); -func AtgStack(ptr *Ctensor, tensorsData []Ctensor, tensorsLen int, dim int64) { - tensorsDataPtr := (*Ctensor)(unsafe.Pointer(&tensorsData[0])) - ctensorsLen := *(*C.int)(unsafe.Pointer(&tensorsLen)) - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - - C.atg_stack(ptr, tensorsDataPtr, ctensorsLen, cdim) -} - -// void atg_mm(tensor *, tensor self, tensor mat2); -func AtgMm(ptr *Ctensor, self Ctensor, mat2 Ctensor) { - C.atg_mm(ptr, self, mat2) -} - -// void atg_view(tensor *, tensor self, int64_t *size_data, int size_len); -func AtgView(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int) { - sizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) - csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) - - C.atg_view(ptr, self, sizeDataPtr, csizeLen) -} - -// void atg_div1(tensor *, tensor self, scalar other); -func AtgDiv1(ptr *Ctensor, self Ctensor, other Cscalar) { - C.atg_div1(ptr, self, other) -} - -// void atg_div(tensor *, tensor self, tensor other); -func AtgDiv(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg_div(ptr, self, other) -} - -// void atg_div_(tensor *, tensor self, tensor other); -func AtgDiv_(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg_div_(ptr, self, other) -} - -// void atg_div_1(tensor *, tensor self, scalar other); -func AtgDiv1_(ptr *Ctensor, self Ctensor, other Cscalar) { - C.atg_div_1(ptr, self, other) -} - -// void atg_randperm(tensor *, int64_t n, int options_kind, int options_device); -func AtgRandperm(ptr *Ctensor, n int64, optionKind int32, optionDevice int32) { - cn := *(*C.int64_t)(unsafe.Pointer(&n)) - coptionKind := *(*C.int)(unsafe.Pointer(&optionKind)) - coptionDevice := *(*C.int)(unsafe.Pointer(&optionDevice)) - - C.atg_randperm(ptr, cn, coptionKind, coptionDevice) -} - -// void atg_clamp_(tensor *, tensor self, scalar min, scalar max); -func AtgClamp_(ptr *Ctensor, self Ctensor, min Cscalar, max Cscalar) { - C.atg_clamp_(ptr, self, min, max) -} - -// void atg_clamp(tensor *, tensor self, scalar min, scalar max); -func AtgClamp(ptr *Ctensor, self Ctensor, min Cscalar, max Cscalar) { - C.atg_clamp(ptr, self, min, max) -} - -// void atg_clamp_max(tensor *, tensor self, scalar max); -func AtgClampMax(ptr *Ctensor, self Ctensor, max Cscalar) { - C.atg_clamp_max(ptr, self, max) -} - -// void atg_relu(tensor *, tensor self); -func AtgRelu(ptr *Ctensor, self Ctensor) { - C.atg_relu(ptr, self) -} - -// void atg_relu_(tensor *, tensor self); -func AtgRelu_(ptr *Ctensor, self Ctensor) { - C.atg_relu_(ptr, self) -} - -// void atg_t(tensor *, tensor self); -func AtgT(ptr *Ctensor, self Ctensor) { - C.atg_t(ptr, self) -} - -// void atg_t_(tensor *, tensor self); -func AtgT_(ptr *Ctensor, self Ctensor) { - C.atg_t_(ptr, self) -} - -// void atg_mse_loss(tensor *, tensor self, tensor target, int64_t reduction); -func AtgMseLoss(ptr *Ctensor, self Ctensor, target Ctensor, reduction int) { - creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) - - C.atg_mse_loss(ptr, self, target, creduction) -} - -// void atg_exp(tensor *, tensor self); -func AtgExp(ptr *Ctensor, self Ctensor) { - C.atg_exp(ptr, self) -} - -// void atg_exp_(tensor *, tensor self); -func AtgExp_(ptr *Ctensor, self Ctensor) { - C.atg_exp_(ptr, self) -} - -// void atg_pow(tensor *, tensor self, scalar exponent); -func AtgPow(ptr *Ctensor, self Ctensor, exponent Cscalar) { - C.atg_pow(ptr, self, exponent) -} - -// void atg_sum(tensor *, tensor self, int dtype); -func AtgSum(ptr *Ctensor, self Ctensor, dtype int32) { - cdtype := *(*C.int)(unsafe.Pointer(&dtype)) - - C.atg_sum(ptr, self, cdtype) -} - -// void atg_sub(tensor *, tensor self, tensor other); -func AtgSub(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg_sub(ptr, self, other) -} - -// void atg_sub1(tensor *, tensor self, scalar other); -func AtgSub1(ptr *Ctensor, self Ctensor, other Cscalar) { - C.atg_sub1(ptr, self, other) -} - -// void atg_sub_(tensor *, tensor self, tensor other); -func AtgSub_(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg_sub_(ptr, self, other) -} - -// void atg_sub_1(tensor *, tensor self, scalar other); -func AtgSub1_(ptr *Ctensor, self Ctensor, other Cscalar) { - C.atg_sub_1(ptr, self, other) -} - -// void atg_conv1d(tensor *, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int64_t groups); -func AtgConv1d(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, groups int64) { - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) - cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) - cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) - - C.atg_conv1d(ptr, input, weight, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cgroups) -} - -// void atg_conv2d(tensor *, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int64_t groups); -func AtgConv2d(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, groups int64) { - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) - cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) - cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) - - C.atg_conv2d(ptr, input, weight, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cgroups) -} - -// void atg_conv3d(tensor *, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int64_t groups); -func AtgConv3d(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, groups int64) { - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) - cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) - cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) - - C.atg_conv3d(ptr, input, weight, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cgroups) -} - -// void atg_max_pool2d(tensor *, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode); -func AtgMaxPool2d(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int) { - - ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) - ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) - cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) - cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) - - C.atg_max_pool2d(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode) -} - -// void atg_avg_pool2d(tensor *, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override); -func AtgAvgPool2d(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, ceilMode int, countIncludePad int, divisorOverride int64) { - - ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) - ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) - ccountIncludePad := *(*C.int)(unsafe.Pointer(&countIncludePad)) - cdivisorOverride := *(*C.int64_t)(unsafe.Pointer(&divisorOverride)) - - C.atg_avg_pool2d(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cceilMode, ccountIncludePad, cdivisorOverride) -} - -// void atg_dropout(tensor *, tensor input, double p, int train); -func AtgDropout(ptr *Ctensor, input Ctensor, p float64, train int) { - cp := *(*C.double)(unsafe.Pointer(&p)) - ctrain := *(*C.int)(unsafe.Pointer(&train)) - - C.atg_dropout(ptr, input, cp, ctrain) -} - -// void atg_dropout_(tensor *, tensor self, double p, int train); -func AtgDropout_(ptr *Ctensor, self Ctensor, p float64, train int) { - cp := *(*C.double)(unsafe.Pointer(&p)) - ctrain := *(*C.int)(unsafe.Pointer(&train)) - - C.atg_dropout_(ptr, self, cp, ctrain) -} - -// void atg_conv_transpose1d(tensor *, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t groups, int64_t *dilation_data, int dilation_len); -func AtgConvTranspose1d(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, outputPaddingData []int64, outputPaddingLen int, dilationData []int64, dilationLen int, groups int64) { - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) - coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) - cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) - cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) - cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) - - C.atg_conv_transpose1d(ptr, input, weight, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, coutputPaddingDataPtr, coutputPaddingLen, cgroups, cdilationDataPtr, cdilationLen) -} - -// void atg_conv_transpose2d(tensor *, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t groups, int64_t *dilation_data, int dilation_len); -func AtgConvTranspose2d(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, outputPaddingData []int64, outputPaddingLen int, dilationData []int64, dilationLen int, groups int64) { - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) - coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) - cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) - cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) - cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) - - C.atg_conv_transpose2d(ptr, input, weight, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, coutputPaddingDataPtr, coutputPaddingLen, cgroups, cdilationDataPtr, cdilationLen) -} - -// void atg_conv_transpose3d(tensor *, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t groups, int64_t *dilation_data, int dilation_len); -func AtgConvTranspose3d(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, outputPaddingData []int64, outputPaddingLen int, dilationData []int64, dilationLen int, groups int64) { - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) - coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) - cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) - cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) - cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) - - C.atg_conv_transpose3d(ptr, input, weight, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, coutputPaddingDataPtr, coutputPaddingLen, cgroups, cdilationDataPtr, cdilationLen) -} - -// void atg_lstm(tensor *, tensor input, tensor *hx_data, int hx_len, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional, int batch_first); -func AtgLstm(ptr *Ctensor, input Ctensor, hxData []Ctensor, hxLen int, paramsData []Ctensor, paramsLen int, hasBiases int, numLayers int64, dropout float64, train int, bidirectional int, batchFirst int) { - - chxDataPtr := (*Ctensor)(unsafe.Pointer(&hxData[0])) - chxLen := *(*C.int)(unsafe.Pointer(&hxLen)) - cparamsDataPtr := (*Ctensor)(unsafe.Pointer(¶msData[0])) - cparamsLen := *(*C.int)(unsafe.Pointer(¶msLen)) - chasBiases := *(*C.int)(unsafe.Pointer(&hasBiases)) - cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) - cdropout := *(*C.double)(unsafe.Pointer(&dropout)) - ctrain := *(*C.int)(unsafe.Pointer(&train)) - cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) - cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst)) - - C.atg_lstm(ptr, input, chxDataPtr, chxLen, cparamsDataPtr, cparamsLen, chasBiases, cnumLayers, cdropout, ctrain, cbidirectional, cbatchFirst) -} - -// void atg_gru(tensor *, tensor input, tensor hx, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional, int batch_first); -func AtgGru(ptr *Ctensor, input Ctensor, hx Ctensor, paramsData []Ctensor, paramsLen int, hasBiases int, numLayers int64, dropout float64, train int, bidirectional int, batchFirst int) { - - cparamsDataPtr := (*Ctensor)(unsafe.Pointer(¶msData[0])) - cparamsLen := *(*C.int)(unsafe.Pointer(¶msLen)) - chasBiases := *(*C.int)(unsafe.Pointer(&hasBiases)) - cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) - cdropout := *(*C.double)(unsafe.Pointer(&dropout)) - ctrain := *(*C.int)(unsafe.Pointer(&train)) - cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) - cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst)) - - C.atg_gru(ptr, input, hx, cparamsDataPtr, cparamsLen, chasBiases, cnumLayers, cdropout, ctrain, cbidirectional, cbatchFirst) -} - -// void atg_randn(tensor *, int64_t *size_data, int size_len, int options_kind, int options_device); -func AtgRandn(ptr *Ctensor, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32) { - - csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) - csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) - coptionKind := *(*C.int)(unsafe.Pointer(&optionsKind)) - coptionDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) - - C.atg_randn(ptr, csizeDataPtr, csizeLen, coptionKind, coptionDevice) -} - -// void atg_embedding(tensor *, tensor weight, tensor indices, int64_t padding_idx, int scale_grad_by_freq, int sparse); -func AtgEmbedding(ptr *Ctensor, weight Ctensor, indices Ctensor, paddingIdx int64, scaleGradByFreq int, sparse int) { - - cpaddingIdx := *(*C.int64_t)(unsafe.Pointer(&paddingIdx)) - cscaleGradByFreq := *(*C.int)(unsafe.Pointer(&scaleGradByFreq)) - csparse := *(*C.int)(unsafe.Pointer(&sparse)) - - C.atg_embedding(ptr, weight, indices, cpaddingIdx, cscaleGradByFreq, csparse) -} - -// void atg_randint(tensor *, int64_t high, int64_t *size_data, int size_len, int options_kind, int options_device); -func AtgRandint(ptr *Ctensor, high int64, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32) { - - chigh := *(*C.int64_t)(unsafe.Pointer(&high)) - csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) - csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) - coptionKind := *(*C.int)(unsafe.Pointer(&optionsKind)) - coptionDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) - - C.atg_randint(ptr, chigh, csizeDataPtr, csizeLen, coptionKind, coptionDevice) -} - -// void atg_layer_norm(tensor *, tensor input, int64_t *normalized_shape_data, int normalized_shape_len, tensor weight, tensor bias, double eps, int cudnn_enable); -func AtgLayerNorm(ptr *Ctensor, input Ctensor, normalizedShapeData []int64, normalizedShapeLen int, weight Ctensor, bias Ctensor, eps float64, cudnnEnable int) { - - cnormalizedShapeDataPtr := (*C.int64_t)(unsafe.Pointer(&normalizedShapeData[0])) - cnormalizedShapeLen := *(*C.int)(unsafe.Pointer(&normalizedShapeLen)) - ceps := *(*C.double)(unsafe.Pointer(&eps)) - ccudnnEnable := *(*C.int)(unsafe.Pointer(&cudnnEnable)) - - C.atg_layer_norm(ptr, input, cnormalizedShapeDataPtr, cnormalizedShapeLen, weight, bias, ceps, ccudnnEnable) -} - -// void atg_batch_norm(tensor *, tensor input, tensor weight, tensor bias, tensor running_mean, tensor running_var, int training, double momentum, double eps, int cudnn_enabled); -func AtgBatchNorm(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, runningMean Ctensor, runningVar Ctensor, training int, momentum float64, eps float64, cudnnEnable int) { - - ctraining := *(*C.int)(unsafe.Pointer(&training)) - cmomentum := *(*C.double)(unsafe.Pointer(&momentum)) - ceps := *(*C.double)(unsafe.Pointer(&eps)) - ccudnnEnable := *(*C.int)(unsafe.Pointer(&cudnnEnable)) - - C.atg_batch_norm(ptr, input, weight, bias, runningMean, runningVar, ctraining, cmomentum, ceps, ccudnnEnable) -} - -// void atg_cat(tensor *, tensor *tensors_data, int tensors_len, int64_t dim); -func AtgCat(ptr *Ctensor, tensorsData []Ctensor, tensorsLen int, dim int64) { - tensorsDataPtr := (*Ctensor)(unsafe.Pointer(&tensorsData[0])) - ctensorsLen := *(*C.int)(unsafe.Pointer(&tensorsLen)) - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - - C.atg_cat(ptr, tensorsDataPtr, ctensorsLen, cdim) -} - -// void atg_topk(tensor *, tensor self, int64_t k, int64_t dim, int largest, int sorted); -func AtgTopk(ptr *Ctensor, self Ctensor, k int64, dim int64, largest int, sorted int) { - ck := *(*C.int64_t)(unsafe.Pointer(&k)) - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - clargest := *(*C.int)(unsafe.Pointer(&largest)) - csorted := *(*C.int)(unsafe.Pointer(&sorted)) - - C.atg_topk(ptr, self, ck, cdim, clargest, csorted) -} - -// void atg_adaptive_avg_pool2d(tensor *, tensor self, int64_t *output_size_data, int output_size_len); -func AtgAdaptiveAvgPool2d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int) { - outputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) - coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) - - C.atg_adaptive_avg_pool2d(ptr, self, outputSizeDataPtr, coutputSizeLen) -} - -// void atg_softmax(tensor *, tensor self, int64_t dim, int dtype); -func AtgSoftmax(ptr *Ctensor, self Ctensor, dim int64, dtype int32) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - cdtype := *(*C.int)(unsafe.Pointer(&dtype)) - - C.atg_softmax(ptr, self, cdim, cdtype) -} - -// void atg_constant_pad_nd(tensor *, tensor self, int64_t *pad_data, int pad_len); -func AtgConstantPadNd(ptr *Ctensor, self Ctensor, padData []int64, padLen int) { - cpadDataPtr := (*C.int64_t)(unsafe.Pointer(&padData[0])) - cpadLen := *(*C.int)(unsafe.Pointer(&padLen)) - - C.atg_constant_pad_nd(ptr, self, cpadDataPtr, cpadLen) -} - -// void atg_sigmoid(tensor *, tensor self); -func AtgSigmoid(ptr *Ctensor, self Ctensor) { - C.atg_sigmoid(ptr, self) -} - -// void atg_flip(tensor *, tensor self, int64_t *dims_data, int dims_len); -func AtgFlip(ptr *Ctensor, self Ctensor, dimsData []int64, dimsLen int) { - - cdimsDataPtr := (*C.int64_t)(unsafe.Pointer(&dimsData[0])) - cdimsLen := *(*C.int)(unsafe.Pointer(&dimsLen)) - - C.atg_flip(ptr, self, cdimsDataPtr, cdimsLen) -} - -// void atg_reflection_pad2d(tensor *, tensor self, int64_t *padding_data, int padding_len); -func AtgReflectionPad2d(ptr *Ctensor, self Ctensor, paddingData []int64, paddingLen int) { - - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - - C.atg_reflection_pad2d(ptr, self, cpaddingDataPtr, cpaddingLen) -} - -// void atg_arange(tensor *, scalar end, int options_kind, int options_device); -func AtgArange(ptr *Ctensor, end Cscalar, optionsKind int32, optionsDevice int32) { - coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) - coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) - - C.atg_arange(ptr, end, coptionsKind, coptionsDevice) -} - -// void atg_arange1(tensor *, scalar start, scalar end, int options_kind, int options_device); -func AtgArange1(ptr *Ctensor, start Cscalar, end Cscalar, optionsKind int32, optionsDevice int32) { - coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) - coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) - - C.atg_arange1(ptr, start, end, coptionsKind, coptionsDevice) -} - -// void atg_arange2(tensor *, scalar start, scalar end, scalar step, int options_kind, int options_device); -func AtgArange2(ptr *Ctensor, start Cscalar, end Cscalar, step Cscalar, optionsKind int32, optionsDevice int32) { - coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) - coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) - - C.atg_arange2(ptr, start, end, step, coptionsKind, coptionsDevice) -} - -// void atg_arange_out(tensor *, tensor out, scalar end); -func AtgArangeOut(ptr *Ctensor, out Ctensor, end Cscalar) { - - C.atg_arange_out(ptr, out, end) -} - -// void atg_arange_out1(tensor *, tensor out, scalar start, scalar end); -func AtgArangeOut1(ptr *Ctensor, out Ctensor, start Cscalar, end Cscalar) { - - C.atg_arange_out1(ptr, out, start, end) -} - -// void atg_max1(tensor *, tensor self, tensor other); -func AtgMax1(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg_max1(ptr, self, other) -} - -// void atg_upsample_nearest2d(tensor *, tensor self, int64_t *output_size_data, int output_size_len, double scales_h, double scales_w); -func AtgUpsampleNearest2d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, scalesH float64, scalesW float64) { - - coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) - coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) - cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) - cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) - - C.atg_upsample_nearest2d(ptr, self, coutputSizeDataPtr, coutputSizeLen, cscalesH, cscalesW) -} - -// void atg_repeat(tensor *, tensor self, int64_t *repeats_data, int repeats_len); -func AtgRepeat(ptr *Ctensor, self Ctensor, repeatData []int64, repeatLen int) { - crepeatDataPtr := (*C.int64_t)(unsafe.Pointer(&repeatData[0])) - crepeatLen := *(*C.int)(unsafe.Pointer(&repeatLen)) - - C.atg_repeat(ptr, self, crepeatDataPtr, crepeatLen) -} - -// void atg_contiguous(tensor *, tensor self); -func AtgContiguous(ptr *Ctensor, self Ctensor) { - C.atg_contiguous(ptr, self) -} - -// void atg_transpose(tensor *, tensor self, int64_t dim0, int64_t dim1); -func AtgTranspose(ptr *Ctensor, self Ctensor, dim0 int64, dim1 int64) { - - cdim0 := *(*C.int64_t)(unsafe.Pointer(&dim0)) - cdim1 := *(*C.int64_t)(unsafe.Pointer(&dim1)) - - C.atg_transpose(ptr, self, cdim0, cdim1) -} - -// void atg_squeeze(tensor *, tensor self); -func AtgSqueeze(ptr *Ctensor, self Ctensor) { - C.atg_squeeze(ptr, self) -} diff --git a/libtch/c-generated.go b/libtch/c-generated.go new file mode 100644 index 0000000..acd6ab3 --- /dev/null +++ b/libtch/c-generated.go @@ -0,0 +1,5487 @@ +package libtch + +// NOTE. THIS FILE IS AUTOMATICALLY GENERATED, DO NOT EDIT BY HAND! + +//#include "stdbool.h" +//#include "torch_api.h" +import "C" + +import "unsafe" + +func Atg__And_(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg___and__(ptr, self, other ) +} +func Atg__And1(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg___and__1(ptr, self, other) +} +func Atg__Iand_(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg___iand__(ptr, self, other ) +} +func Atg__Iand1(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg___iand__1(ptr, self, other) +} +func Atg__Ilshift_(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg___ilshift__(ptr, self, other ) +} +func Atg__Ilshift1(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg___ilshift__1(ptr, self, other) +} +func Atg__Ior_(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg___ior__(ptr, self, other ) +} +func Atg__Ior1(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg___ior__1(ptr, self, other) +} +func Atg__Irshift_(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg___irshift__(ptr, self, other ) +} +func Atg__Irshift1(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg___irshift__1(ptr, self, other) +} +func Atg__Ixor_(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg___ixor__(ptr, self, other ) +} +func Atg__Ixor1(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg___ixor__1(ptr, self, other) +} +func Atg__Lshift_(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg___lshift__(ptr, self, other ) +} +func Atg__Lshift1(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg___lshift__1(ptr, self, other) +} +func Atg__Or_(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg___or__(ptr, self, other ) +} +func Atg__Or1(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg___or__1(ptr, self, other) +} +func Atg__Rshift_(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg___rshift__(ptr, self, other ) +} +func Atg__Rshift1(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg___rshift__1(ptr, self, other) +} +func Atg__Xor_(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg___xor__(ptr, self, other ) +} +func Atg__Xor1(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg___xor__1(ptr, self, other) +} +func Atg_AdaptiveAvgPool2d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +C.atg__adaptive_avg_pool2d(ptr, self, coutputSizeDataPtr, coutputSizeLen) +} +func Atg_AdaptiveAvgPool2dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor){ +C.atg__adaptive_avg_pool2d_backward(ptr, gradOutput, self) +} +func Atg_Addr(ptr *Ctensor, self Ctensor, vec1 Ctensor, vec2 Ctensor){ +C.atg__addr(ptr, self, vec1, vec2) +} +func Atg_Addr_(ptr *Ctensor, self Ctensor, vec1 Ctensor, vec2 Ctensor){ +C.atg__addr_(ptr, self, vec1, vec2) +} +func Atg_AddrOut(ptr *Ctensor, out Ctensor, self Ctensor, vec1 Ctensor, vec2 Ctensor){ +C.atg__addr_out(ptr, out, self, vec1, vec2) +} +func Atg_AmpUpdateScale(ptr *Ctensor, growthTracker Ctensor, currentScale Ctensor, foundInf Ctensor, scaleGrowthFactor float64, scaleBackoffFactor float64, growthInterval int64){ +cscaleGrowthFactor := *(*C.double)(unsafe.Pointer(&scaleGrowthFactor)) +cscaleBackoffFactor := *(*C.double)(unsafe.Pointer(&scaleBackoffFactor)) +cgrowthInterval := *(*C.int64_t)(unsafe.Pointer(&growthInterval)) +C.atg__amp_update_scale(ptr, growthTracker, currentScale, foundInf, cscaleGrowthFactor, cscaleBackoffFactor, cgrowthInterval) +} +func Atg_BaddbmmMkl_(ptr *Ctensor, self Ctensor, batch1 Ctensor, batch2 Ctensor){ +C.atg__baddbmm_mkl_(ptr, self, batch1, batch2) +} +func Atg_CastByte(ptr *Ctensor, self Ctensor, nonBlocking int32){ +cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) +C.atg__cast_byte(ptr, self, cnonBlocking) +} +func Atg_CastChar(ptr *Ctensor, self Ctensor, nonBlocking int32){ +cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) +C.atg__cast_char(ptr, self, cnonBlocking) +} +func Atg_CastDouble(ptr *Ctensor, self Ctensor, nonBlocking int32){ +cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) +C.atg__cast_double(ptr, self, cnonBlocking) +} +func Atg_CastFloat(ptr *Ctensor, self Ctensor, nonBlocking int32){ +cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) +C.atg__cast_float(ptr, self, cnonBlocking) +} +func Atg_CastHalf(ptr *Ctensor, self Ctensor, nonBlocking int32){ +cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) +C.atg__cast_half(ptr, self, cnonBlocking) +} +func Atg_CastInt(ptr *Ctensor, self Ctensor, nonBlocking int32){ +cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) +C.atg__cast_int(ptr, self, cnonBlocking) +} +func Atg_CastLong(ptr *Ctensor, self Ctensor, nonBlocking int32){ +cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) +C.atg__cast_long(ptr, self, cnonBlocking) +} +func Atg_CastShort(ptr *Ctensor, self Ctensor, nonBlocking int32){ +cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) +C.atg__cast_short(ptr, self, cnonBlocking) +} +func Atg_Cat(ptr *Ctensor, tensorsData []Ctensor, tensorsLen int, dim int64){ +ctensorsDataPtr := (*Ctensor)(unsafe.Pointer(&tensorsData[0])) +ctensorsLen := *(*C.int)(unsafe.Pointer(&tensorsLen)) +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg__cat(ptr, ctensorsDataPtr, ctensorsLen, cdim) +} +func Atg_CatOut(ptr *Ctensor, out Ctensor, tensorsData []Ctensor, tensorsLen int, dim int64){ +ctensorsDataPtr := (*Ctensor)(unsafe.Pointer(&tensorsData[0])) +ctensorsLen := *(*C.int)(unsafe.Pointer(&tensorsLen)) +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg__cat_out(ptr, out, ctensorsDataPtr, ctensorsLen, cdim) +} +func Atg_CdistBackward(ptr *Ctensor, grad Ctensor, x1 Ctensor, x2 Ctensor, p float64, cdist Ctensor){ +cp := *(*C.double)(unsafe.Pointer(&p)) +C.atg__cdist_backward(ptr, grad, x1, x2, cp, cdist) +} +func Atg_CholeskyHelper(ptr *Ctensor, self Ctensor, upper int32){ +cupper := *(*C.int)(unsafe.Pointer(&upper)) +C.atg__cholesky_helper(ptr, self, cupper) +} +func Atg_CholeskySolveHelper(ptr *Ctensor, self Ctensor, a Ctensor, upper int32){ +cupper := *(*C.int)(unsafe.Pointer(&upper)) +C.atg__cholesky_solve_helper(ptr, self, a, cupper) +} +func Atg_Coalesced_(ptr *Ctensor, self Ctensor, coalesced int32){ +ccoalesced := *(*C.int)(unsafe.Pointer(&coalesced)) +C.atg__coalesced_(ptr, self, ccoalesced) +} +func Atg_Convolution(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, transposed int32, outputPaddingData []int64, outputPaddingLen int, groups int64, benchmark int32, deterministic int32, cudnnEnabled int32){ +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +ctransposed := *(*C.int)(unsafe.Pointer(&transposed)) +coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) +coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) +cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) +cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) +cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) +ccudnnEnabled := *(*C.int)(unsafe.Pointer(&cudnnEnabled)) +C.atg__convolution(ptr, input, weight, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, ctransposed, coutputPaddingDataPtr, coutputPaddingLen, cgroups, cbenchmark, cdeterministic, ccudnnEnabled) +} +func Atg_ConvolutionNogroup(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, transposed int32, outputPaddingData []int64, outputPaddingLen int){ +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +ctransposed := *(*C.int)(unsafe.Pointer(&transposed)) +coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) +coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) +C.atg__convolution_nogroup(ptr, input, weight, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, ctransposed, coutputPaddingDataPtr, coutputPaddingLen) +} +func Atg_CopyFrom(ptr *Ctensor, self Ctensor, dst Ctensor, nonBlocking int32){ +cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) +C.atg__copy_from(ptr, self, dst, cnonBlocking) +} +func Atg_CtcLoss(ptr *Ctensor, logProbs Ctensor, targets Ctensor, inputLengthsData []int64, inputLengthsLen int, targetLengthsData []int64, targetLengthsLen int, blank int64, zeroInfinity int32){ +cinputLengthsDataPtr := (*C.int64_t)(unsafe.Pointer(&inputLengthsData[0])) +cinputLengthsLen := *(*C.int)(unsafe.Pointer(&inputLengthsLen)) +ctargetLengthsDataPtr := (*C.int64_t)(unsafe.Pointer(&targetLengthsData[0])) +ctargetLengthsLen := *(*C.int)(unsafe.Pointer(&targetLengthsLen)) +cblank := *(*C.int64_t)(unsafe.Pointer(&blank)) +czeroInfinity := *(*C.int)(unsafe.Pointer(&zeroInfinity)) +C.atg__ctc_loss(ptr, logProbs, targets, cinputLengthsDataPtr, cinputLengthsLen, ctargetLengthsDataPtr, ctargetLengthsLen, cblank, czeroInfinity) +} +func Atg_CtcLossBackward(ptr *Ctensor, grad Ctensor, logProbs Ctensor, targets Ctensor, inputLengthsData []int64, inputLengthsLen int, targetLengthsData []int64, targetLengthsLen int, negLogLikelihood Ctensor, logAlpha Ctensor, blank int64, zeroInfinity int32){ +cinputLengthsDataPtr := (*C.int64_t)(unsafe.Pointer(&inputLengthsData[0])) +cinputLengthsLen := *(*C.int)(unsafe.Pointer(&inputLengthsLen)) +ctargetLengthsDataPtr := (*C.int64_t)(unsafe.Pointer(&targetLengthsData[0])) +ctargetLengthsLen := *(*C.int)(unsafe.Pointer(&targetLengthsLen)) +cblank := *(*C.int64_t)(unsafe.Pointer(&blank)) +czeroInfinity := *(*C.int)(unsafe.Pointer(&zeroInfinity)) +C.atg__ctc_loss_backward(ptr, grad, logProbs, targets, cinputLengthsDataPtr, cinputLengthsLen, ctargetLengthsDataPtr, ctargetLengthsLen, negLogLikelihood, logAlpha, cblank, czeroInfinity) +} +func Atg_CudnnCtcLoss(ptr *Ctensor, logProbs Ctensor, targets Ctensor, inputLengthsData []int64, inputLengthsLen int, targetLengthsData []int64, targetLengthsLen int, blank int64, deterministic int32, zeroInfinity int32){ +cinputLengthsDataPtr := (*C.int64_t)(unsafe.Pointer(&inputLengthsData[0])) +cinputLengthsLen := *(*C.int)(unsafe.Pointer(&inputLengthsLen)) +ctargetLengthsDataPtr := (*C.int64_t)(unsafe.Pointer(&targetLengthsData[0])) +ctargetLengthsLen := *(*C.int)(unsafe.Pointer(&targetLengthsLen)) +cblank := *(*C.int64_t)(unsafe.Pointer(&blank)) +cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) +czeroInfinity := *(*C.int)(unsafe.Pointer(&zeroInfinity)) +C.atg__cudnn_ctc_loss(ptr, logProbs, targets, cinputLengthsDataPtr, cinputLengthsLen, ctargetLengthsDataPtr, ctargetLengthsLen, cblank, cdeterministic, czeroInfinity) +} +func Atg_CudnnInitDropoutState(ptr *Ctensor, dropout float64, train int32, dropoutSeed int64, optionsKind int32, optionsDevice int32){ +cdropout := *(*C.double)(unsafe.Pointer(&dropout)) +ctrain := *(*C.int)(unsafe.Pointer(&train)) +cdropoutSeed := *(*C.int64_t)(unsafe.Pointer(&dropoutSeed)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg__cudnn_init_dropout_state(ptr, cdropout, ctrain, cdropoutSeed, coptionsKind, coptionsDevice) +} +func Atg_CudnnRnn(ptr *Ctensor, input Ctensor, weightData []Ctensor, weightLen int, weightStride0 int64, weightBuf Ctensor, hx Ctensor, cx Ctensor, mode int64, hiddenSize int64, numLayers int64, batchFirst int32, dropout float64, train int32, bidirectional int32, batchSizesData []int64, batchSizesLen int, dropoutState Ctensor){ +cweightDataPtr := (*Ctensor)(unsafe.Pointer(&weightData[0])) +cweightLen := *(*C.int)(unsafe.Pointer(&weightLen)) +cweightStride0 := *(*C.int64_t)(unsafe.Pointer(&weightStride0)) +cmode := *(*C.int64_t)(unsafe.Pointer(&mode)) +chiddenSize := *(*C.int64_t)(unsafe.Pointer(&hiddenSize)) +cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) +cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst)) +cdropout := *(*C.double)(unsafe.Pointer(&dropout)) +ctrain := *(*C.int)(unsafe.Pointer(&train)) +cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) +cbatchSizesDataPtr := (*C.int64_t)(unsafe.Pointer(&batchSizesData[0])) +cbatchSizesLen := *(*C.int)(unsafe.Pointer(&batchSizesLen)) +C.atg__cudnn_rnn(ptr, input, cweightDataPtr, cweightLen, cweightStride0, weightBuf, hx, cx, cmode, chiddenSize, cnumLayers, cbatchFirst, cdropout, ctrain, cbidirectional, cbatchSizesDataPtr, cbatchSizesLen, dropoutState) +} +func Atg_CudnnRnnFlattenWeight(ptr *Ctensor, weightArrData []Ctensor, weightArrLen int, weightStride0 int64, inputSize int64, mode int64, hiddenSize int64, numLayers int64, batchFirst int32, bidirectional int32){ +cweightArrDataPtr := (*Ctensor)(unsafe.Pointer(&weightArrData[0])) +cweightArrLen := *(*C.int)(unsafe.Pointer(&weightArrLen)) +cweightStride0 := *(*C.int64_t)(unsafe.Pointer(&weightStride0)) +cinputSize := *(*C.int64_t)(unsafe.Pointer(&inputSize)) +cmode := *(*C.int64_t)(unsafe.Pointer(&mode)) +chiddenSize := *(*C.int64_t)(unsafe.Pointer(&hiddenSize)) +cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) +cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst)) +cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) +C.atg__cudnn_rnn_flatten_weight(ptr, cweightArrDataPtr, cweightArrLen, cweightStride0, cinputSize, cmode, chiddenSize, cnumLayers, cbatchFirst, cbidirectional) +} +func Atg_Cumprod(ptr *Ctensor, self Ctensor, dim int64){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg__cumprod(ptr, self, cdim) +} +func Atg_CumprodOut(ptr *Ctensor, out Ctensor, self Ctensor, dim int64){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg__cumprod_out(ptr, out, self, cdim) +} +func Atg_Cumsum(ptr *Ctensor, self Ctensor, dim int64){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg__cumsum(ptr, self, cdim) +} +func Atg_CumsumOut(ptr *Ctensor, out Ctensor, self Ctensor, dim int64){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg__cumsum_out(ptr, out, self, cdim) +} +func Atg_DimArange(ptr *Ctensor, like Ctensor, dim int64){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg__dim_arange(ptr, like, cdim) +} +func Atg_DirichletGrad(ptr *Ctensor, x Ctensor, alpha Ctensor, total Ctensor){ +C.atg__dirichlet_grad(ptr, x, alpha, total) +} +func Atg_EmbeddingBag(ptr *Ctensor, weight Ctensor, indices Ctensor, offsets Ctensor, scaleGradByFreq int32, mode int64, sparse int32, perSampleWeights Ctensor, includeLastOffset int32){ +cscaleGradByFreq := *(*C.int)(unsafe.Pointer(&scaleGradByFreq)) +cmode := *(*C.int64_t)(unsafe.Pointer(&mode)) +csparse := *(*C.int)(unsafe.Pointer(&sparse)) +cincludeLastOffset := *(*C.int)(unsafe.Pointer(&includeLastOffset)) +C.atg__embedding_bag(ptr, weight, indices, offsets, cscaleGradByFreq, cmode, csparse, perSampleWeights, cincludeLastOffset) +} +func Atg_EmbeddingBagBackward(ptr *Ctensor, grad Ctensor, indices Ctensor, offsets Ctensor, offset2bag Ctensor, bagSize Ctensor, maximumIndices Ctensor, numWeights int64, scaleGradByFreq int32, mode int64, sparse int32, perSampleWeights Ctensor){ +cnumWeights := *(*C.int64_t)(unsafe.Pointer(&numWeights)) +cscaleGradByFreq := *(*C.int)(unsafe.Pointer(&scaleGradByFreq)) +cmode := *(*C.int64_t)(unsafe.Pointer(&mode)) +csparse := *(*C.int)(unsafe.Pointer(&sparse)) +C.atg__embedding_bag_backward(ptr, grad, indices, offsets, offset2bag, bagSize, maximumIndices, cnumWeights, cscaleGradByFreq, cmode, csparse, perSampleWeights) +} +func Atg_EmbeddingBagDenseBackward(ptr *Ctensor, grad Ctensor, indices Ctensor, offsets Ctensor, offset2bag Ctensor, bagSize Ctensor, maximumIndices Ctensor, numWeights int64, scaleGradByFreq int32, mode int64, perSampleWeights Ctensor){ +cnumWeights := *(*C.int64_t)(unsafe.Pointer(&numWeights)) +cscaleGradByFreq := *(*C.int)(unsafe.Pointer(&scaleGradByFreq)) +cmode := *(*C.int64_t)(unsafe.Pointer(&mode)) +C.atg__embedding_bag_dense_backward(ptr, grad, indices, offsets, offset2bag, bagSize, maximumIndices, cnumWeights, cscaleGradByFreq, cmode, perSampleWeights) +} +func Atg_EmbeddingBagPerSampleWeightsBackward(ptr *Ctensor, grad Ctensor, weight Ctensor, indices Ctensor, offsets Ctensor, offset2bag Ctensor, mode int64){ +cmode := *(*C.int64_t)(unsafe.Pointer(&mode)) +C.atg__embedding_bag_per_sample_weights_backward(ptr, grad, weight, indices, offsets, offset2bag, cmode) +} +func Atg_EmbeddingBagSparseBackward(ptr *Ctensor, grad Ctensor, indices Ctensor, offsets Ctensor, offset2bag Ctensor, bagSize Ctensor, numWeights int64, scaleGradByFreq int32, mode int64, perSampleWeights Ctensor){ +cnumWeights := *(*C.int64_t)(unsafe.Pointer(&numWeights)) +cscaleGradByFreq := *(*C.int)(unsafe.Pointer(&scaleGradByFreq)) +cmode := *(*C.int64_t)(unsafe.Pointer(&mode)) +C.atg__embedding_bag_sparse_backward(ptr, grad, indices, offsets, offset2bag, bagSize, cnumWeights, cscaleGradByFreq, cmode, perSampleWeights) +} +func Atg_EmptyAffineQuantized(ptr *Ctensor, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32, scale float64, zeroPoint int64){ +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +cscale := *(*C.double)(unsafe.Pointer(&scale)) +czeroPoint := *(*C.int64_t)(unsafe.Pointer(&zeroPoint)) +C.atg__empty_affine_quantized(ptr, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice, cscale, czeroPoint) +} +func Atg_EmptyPerChannelAffineQuantized(ptr *Ctensor, sizeData []int64, sizeLen int, scales Ctensor, zeroPoints Ctensor, axis int64, optionsKind int32, optionsDevice int32){ +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +caxis := *(*C.int64_t)(unsafe.Pointer(&axis)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg__empty_per_channel_affine_quantized(ptr, csizeDataPtr, csizeLen, scales, zeroPoints, caxis, coptionsKind, coptionsDevice) +} +func Atg_FftWithSize(ptr *Ctensor, self Ctensor, signalNdim int64, complexInput int32, complexOutput int32, inverse int32, checkedSignalSizesData []int64, checkedSignalSizesLen int, normalized int32, onesided int32, outputSizesData []int64, outputSizesLen int){ +csignalNdim := *(*C.int64_t)(unsafe.Pointer(&signalNdim)) +ccomplexInput := *(*C.int)(unsafe.Pointer(&complexInput)) +ccomplexOutput := *(*C.int)(unsafe.Pointer(&complexOutput)) +cinverse := *(*C.int)(unsafe.Pointer(&inverse)) +ccheckedSignalSizesDataPtr := (*C.int64_t)(unsafe.Pointer(&checkedSignalSizesData[0])) +ccheckedSignalSizesLen := *(*C.int)(unsafe.Pointer(&checkedSignalSizesLen)) +cnormalized := *(*C.int)(unsafe.Pointer(&normalized)) +conesided := *(*C.int)(unsafe.Pointer(&onesided)) +coutputSizesDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizesData[0])) +coutputSizesLen := *(*C.int)(unsafe.Pointer(&outputSizesLen)) +C.atg__fft_with_size(ptr, self, csignalNdim, ccomplexInput, ccomplexOutput, cinverse, ccheckedSignalSizesDataPtr, ccheckedSignalSizesLen, cnormalized, conesided, coutputSizesDataPtr, coutputSizesLen) +} +func Atg_FusedDropout(ptr *Ctensor, self Ctensor, p float64){ +cp := *(*C.double)(unsafe.Pointer(&p)) +C.atg__fused_dropout(ptr, self, cp) +} +func Atg_GatherSparseBackward(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, grad Ctensor){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg__gather_sparse_backward(ptr, self, cdim, index, grad) +} +func Atg_IndexCopy_(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, source Ctensor){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg__index_copy_(ptr, self, cdim, index, source) +} +func Atg_IndexPutImpl_(ptr *Ctensor, self Ctensor, indicesData []Ctensor, indicesLen int, values Ctensor, accumulate int32, unsafety int32){ +cindicesDataPtr := (*Ctensor)(unsafe.Pointer(&indicesData[0])) +cindicesLen := *(*C.int)(unsafe.Pointer(&indicesLen)) +caccumulate := *(*C.int)(unsafe.Pointer(&accumulate)) +cunsafety := *(*C.int)(unsafe.Pointer(&unsafety)) +C.atg__index_put_impl_(ptr, self, cindicesDataPtr, cindicesLen, values, caccumulate, cunsafety) +} +func Atg_Indices(ptr *Ctensor, self Ctensor){ +C.atg__indices(ptr, self) +} +func Atg_InverseHelper(ptr *Ctensor, self Ctensor){ +C.atg__inverse_helper(ptr, self) +} +func Atg_LogSoftmax(ptr *Ctensor, self Ctensor, dim int64, halfToFloat int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +chalfToFloat := *(*C.int)(unsafe.Pointer(&halfToFloat)) +C.atg__log_softmax(ptr, self, cdim, chalfToFloat) +} +func Atg_LogSoftmaxBackwardData(ptr *Ctensor, gradOutput Ctensor, output Ctensor, dim int64, self Ctensor){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg__log_softmax_backward_data(ptr, gradOutput, output, cdim, self) +} +func Atg_LuSolveHelper(ptr *Ctensor, self Ctensor, lUData Ctensor, lUPivots Ctensor){ +C.atg__lu_solve_helper(ptr, self, lUData, lUPivots) +} +func Atg_LuWithInfo(ptr *Ctensor, self Ctensor, pivot int32, checkErrors int32){ +cpivot := *(*C.int)(unsafe.Pointer(&pivot)) +ccheckErrors := *(*C.int)(unsafe.Pointer(&checkErrors)) +C.atg__lu_with_info(ptr, self, cpivot, ccheckErrors) +} +func Atg_MakePerChannelQuantizedTensor(ptr *Ctensor, self Ctensor, scale Ctensor, zeroPoint Ctensor, axis int64){ +caxis := *(*C.int64_t)(unsafe.Pointer(&axis)) +C.atg__make_per_channel_quantized_tensor(ptr, self, scale, zeroPoint, caxis) +} +func Atg_MakePerTensorQuantizedTensor(ptr *Ctensor, self Ctensor, scale float64, zeroPoint int64){ +cscale := *(*C.double)(unsafe.Pointer(&scale)) +czeroPoint := *(*C.int64_t)(unsafe.Pointer(&zeroPoint)) +C.atg__make_per_tensor_quantized_tensor(ptr, self, cscale, czeroPoint) +} +func Atg_MaskedScale(ptr *Ctensor, self Ctensor, mask Ctensor, scale float64){ +cscale := *(*C.double)(unsafe.Pointer(&scale)) +C.atg__masked_scale(ptr, self, mask, cscale) +} +func Atg_Max(ptr *Ctensor, self Ctensor, dim int64, keepdim int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg__max(ptr, self, cdim, ckeepdim) +} +func Atg_MaxOut(ptr *Ctensor, max Ctensor, maxIndices Ctensor, self Ctensor, dim int64, keepdim int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg__max_out(ptr, max, maxIndices, self, cdim, ckeepdim) +} +func Atg_Min(ptr *Ctensor, self Ctensor, dim int64, keepdim int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg__min(ptr, self, cdim, ckeepdim) +} +func Atg_MinOut(ptr *Ctensor, min Ctensor, minIndices Ctensor, self Ctensor, dim int64, keepdim int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg__min_out(ptr, min, minIndices, self, cdim, ckeepdim) +} +func Atg_MkldnnReshape(ptr *Ctensor, self Ctensor, shapeData []int64, shapeLen int){ +cshapeDataPtr := (*C.int64_t)(unsafe.Pointer(&shapeData[0])) +cshapeLen := *(*C.int)(unsafe.Pointer(&shapeLen)) +C.atg__mkldnn_reshape(ptr, self, cshapeDataPtr, cshapeLen) +} +func Atg_MkldnnTranspose(ptr *Ctensor, self Ctensor, dim0 int64, dim1 int64){ +cdim0 := *(*C.int64_t)(unsafe.Pointer(&dim0)) +cdim1 := *(*C.int64_t)(unsafe.Pointer(&dim1)) +C.atg__mkldnn_transpose(ptr, self, cdim0, cdim1) +} +func Atg_MkldnnTranspose_(ptr *Ctensor, self Ctensor, dim0 int64, dim1 int64){ +cdim0 := *(*C.int64_t)(unsafe.Pointer(&dim0)) +cdim1 := *(*C.int64_t)(unsafe.Pointer(&dim1)) +C.atg__mkldnn_transpose_(ptr, self, cdim0, cdim1) +} +func Atg_Mode(ptr *Ctensor, self Ctensor, dim int64, keepdim int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg__mode(ptr, self, cdim, ckeepdim) +} +func Atg_ModeOut(ptr *Ctensor, values Ctensor, indices Ctensor, self Ctensor, dim int64, keepdim int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg__mode_out(ptr, values, indices, self, cdim, ckeepdim) +} +func Atg_MultinomialAliasDraw(ptr *Ctensor, j Ctensor, q Ctensor, numSamples int64){ +cnumSamples := *(*C.int64_t)(unsafe.Pointer(&numSamples)) +C.atg__multinomial_alias_draw(ptr, j, q, cnumSamples) +} +func Atg_MultinomialAliasSetup(ptr *Ctensor, probs Ctensor){ +C.atg__multinomial_alias_setup(ptr, probs) +} +func Atg_NnpackSpatialConvolution(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int){ +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +C.atg__nnpack_spatial_convolution(ptr, input, weight, bias, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen) +} +func Atg_NnpackSpatialConvolutionBackwardInput(ptr *Ctensor, input Ctensor, gradOutput Ctensor, weight Ctensor, paddingData []int64, paddingLen int){ +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +C.atg__nnpack_spatial_convolution_backward_input(ptr, input, gradOutput, weight, cpaddingDataPtr, cpaddingLen) +} +func Atg_NnpackSpatialConvolutionBackwardWeight(ptr *Ctensor, input Ctensor, weightsizeData []int64, weightsizeLen int, gradOutput Ctensor, paddingData []int64, paddingLen int){ +cweightsizeDataPtr := (*C.int64_t)(unsafe.Pointer(&weightsizeData[0])) +cweightsizeLen := *(*C.int)(unsafe.Pointer(&weightsizeLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +C.atg__nnpack_spatial_convolution_backward_weight(ptr, input, cweightsizeDataPtr, cweightsizeLen, gradOutput, cpaddingDataPtr, cpaddingLen) +} +func Atg_PackPaddedSequence(ptr *Ctensor, input Ctensor, lengths Ctensor, batchFirst int32){ +cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst)) +C.atg__pack_padded_sequence(ptr, input, lengths, cbatchFirst) +} +func Atg_PackPaddedSequenceBackward(ptr *Ctensor, grad Ctensor, inputSizeData []int64, inputSizeLen int, batchSizes Ctensor, batchFirst int32){ +cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) +cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) +cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst)) +C.atg__pack_padded_sequence_backward(ptr, grad, cinputSizeDataPtr, cinputSizeLen, batchSizes, cbatchFirst) +} +func Atg_PadPackedSequence(ptr *Ctensor, data Ctensor, batchSizes Ctensor, batchFirst int32, paddingValue Cscalar, totalLength int64){ +cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst)) +ctotalLength := *(*C.int64_t)(unsafe.Pointer(&totalLength)) +C.atg__pad_packed_sequence(ptr, data, batchSizes, cbatchFirst, paddingValue , ctotalLength) +} +func Atg_PdistBackward(ptr *Ctensor, grad Ctensor, self Ctensor, p float64, pdist Ctensor){ +cp := *(*C.double)(unsafe.Pointer(&p)) +C.atg__pdist_backward(ptr, grad, self, cp, pdist) +} +func Atg_QrHelper(ptr *Ctensor, self Ctensor, some int32){ +csome := *(*C.int)(unsafe.Pointer(&some)) +C.atg__qr_helper(ptr, self, csome) +} +func Atg_ReshapeFromTensor(ptr *Ctensor, self Ctensor, shape Ctensor){ +C.atg__reshape_from_tensor(ptr, self, shape) +} +func Atg_SWhere(ptr *Ctensor, condition Ctensor, self Ctensor, other Ctensor){ +C.atg__s_where(ptr, condition, self, other) +} +func Atg_SampleDirichlet(ptr *Ctensor, self Ctensor){ +C.atg__sample_dirichlet(ptr, self) +} +func Atg_ShapeAsTensor(ptr *Ctensor, self Ctensor){ +C.atg__shape_as_tensor(ptr, self) +} +func Atg_SobolEngineDraw(ptr *Ctensor, quasi Ctensor, n int64, sobolstate Ctensor, dimension int64, numGenerated int64, dtype int32){ +cn := *(*C.int64_t)(unsafe.Pointer(&n)) +cdimension := *(*C.int64_t)(unsafe.Pointer(&dimension)) +cnumGenerated := *(*C.int64_t)(unsafe.Pointer(&numGenerated)) +cdtype := *(*C.int)(unsafe.Pointer(&dtype)) +C.atg__sobol_engine_draw(ptr, quasi, cn, sobolstate, cdimension, cnumGenerated, cdtype) +} +func Atg_SobolEngineFf_(ptr *Ctensor, self Ctensor, n int64, sobolstate Ctensor, dimension int64, numGenerated int64){ +cn := *(*C.int64_t)(unsafe.Pointer(&n)) +cdimension := *(*C.int64_t)(unsafe.Pointer(&dimension)) +cnumGenerated := *(*C.int64_t)(unsafe.Pointer(&numGenerated)) +C.atg__sobol_engine_ff_(ptr, self, cn, sobolstate, cdimension, cnumGenerated) +} +func Atg_SobolEngineInitializeState_(ptr *Ctensor, self Ctensor, dimension int64){ +cdimension := *(*C.int64_t)(unsafe.Pointer(&dimension)) +C.atg__sobol_engine_initialize_state_(ptr, self, cdimension) +} +func Atg_SobolEngineScramble_(ptr *Ctensor, self Ctensor, ltm Ctensor, dimension int64){ +cdimension := *(*C.int64_t)(unsafe.Pointer(&dimension)) +C.atg__sobol_engine_scramble_(ptr, self, ltm, cdimension) +} +func Atg_Softmax(ptr *Ctensor, self Ctensor, dim int64, halfToFloat int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +chalfToFloat := *(*C.int)(unsafe.Pointer(&halfToFloat)) +C.atg__softmax(ptr, self, cdim, chalfToFloat) +} +func Atg_SoftmaxBackwardData(ptr *Ctensor, gradOutput Ctensor, output Ctensor, dim int64, self Ctensor){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg__softmax_backward_data(ptr, gradOutput, output, cdim, self) +} +func Atg_SolveHelper(ptr *Ctensor, self Ctensor, a Ctensor){ +C.atg__solve_helper(ptr, self, a) +} +func Atg_SparseAddmm(ptr *Ctensor, self Ctensor, sparse Ctensor, dense Ctensor){ +C.atg__sparse_addmm(ptr, self, sparse, dense) +} +func Atg_SparseCooTensorUnsafe(ptr *Ctensor, indices Ctensor, values Ctensor, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32){ +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg__sparse_coo_tensor_unsafe(ptr, indices, values, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice) +} +func Atg_SparseCooTensorWithDims(ptr *Ctensor, sparseDim int64, denseDim int64, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32){ +csparseDim := *(*C.int64_t)(unsafe.Pointer(&sparseDim)) +cdenseDim := *(*C.int64_t)(unsafe.Pointer(&denseDim)) +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg__sparse_coo_tensor_with_dims(ptr, csparseDim, cdenseDim, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice) +} +func Atg_SparseCooTensorWithDimsAndTensors(ptr *Ctensor, sparseDim int64, denseDim int64, sizeData []int64, sizeLen int, indices Ctensor, values Ctensor, optionsKind int32, optionsDevice int32){ +csparseDim := *(*C.int64_t)(unsafe.Pointer(&sparseDim)) +cdenseDim := *(*C.int64_t)(unsafe.Pointer(&denseDim)) +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg__sparse_coo_tensor_with_dims_and_tensors(ptr, csparseDim, cdenseDim, csizeDataPtr, csizeLen, indices, values, coptionsKind, coptionsDevice) +} +func Atg_SparseMm(ptr *Ctensor, sparse Ctensor, dense Ctensor){ +C.atg__sparse_mm(ptr, sparse, dense) +} +func Atg_SparseSum(ptr *Ctensor, self Ctensor){ +C.atg__sparse_sum(ptr, self) +} +func Atg_SparseSum1(ptr *Ctensor, self Ctensor, dtype int32){ +cdtype := *(*C.int)(unsafe.Pointer(&dtype)) +C.atg__sparse_sum1(ptr, self, cdtype) +} +func Atg_SparseSum2(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int){ +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +C.atg__sparse_sum2(ptr, self, cdimDataPtr, cdimLen) +} +func Atg_SparseSum3(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, dtype int32){ +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +cdtype := *(*C.int)(unsafe.Pointer(&dtype)) +C.atg__sparse_sum3(ptr, self, cdimDataPtr, cdimLen, cdtype) +} +func Atg_SparseSumBackward(ptr *Ctensor, grad Ctensor, self Ctensor, dimData []int64, dimLen int){ +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +C.atg__sparse_sum_backward(ptr, grad, self, cdimDataPtr, cdimLen) +} +func Atg_StandardGamma(ptr *Ctensor, self Ctensor){ +C.atg__standard_gamma(ptr, self) +} +func Atg_StandardGammaGrad(ptr *Ctensor, self Ctensor, output Ctensor){ +C.atg__standard_gamma_grad(ptr, self, output) +} +func Atg_Std(ptr *Ctensor, self Ctensor, unbiased int32){ +cunbiased := *(*C.int)(unsafe.Pointer(&unbiased)) +C.atg__std(ptr, self, cunbiased) +} +func Atg_SvdHelper(ptr *Ctensor, self Ctensor, some int32, computeUv int32){ +csome := *(*C.int)(unsafe.Pointer(&some)) +ccomputeUv := *(*C.int)(unsafe.Pointer(&computeUv)) +C.atg__svd_helper(ptr, self, csome, ccomputeUv) +} +func Atg_SymeigHelper(ptr *Ctensor, self Ctensor, eigenvectors int32, upper int32){ +ceigenvectors := *(*C.int)(unsafe.Pointer(&eigenvectors)) +cupper := *(*C.int)(unsafe.Pointer(&upper)) +C.atg__symeig_helper(ptr, self, ceigenvectors, cupper) +} +func Atg_TriangularSolveHelper(ptr *Ctensor, self Ctensor, a Ctensor, upper int32, transpose int32, unitriangular int32){ +cupper := *(*C.int)(unsafe.Pointer(&upper)) +ctranspose := *(*C.int)(unsafe.Pointer(&transpose)) +cunitriangular := *(*C.int)(unsafe.Pointer(&unitriangular)) +C.atg__triangular_solve_helper(ptr, self, a, cupper, ctranspose, cunitriangular) +} +func Atg_Trilinear(ptr *Ctensor, i1 Ctensor, i2 Ctensor, i3 Ctensor, expand1Data []int64, expand1Len int, expand2Data []int64, expand2Len int, expand3Data []int64, expand3Len int, sumdimData []int64, sumdimLen int, unrollDim int64){ +cexpand1DataPtr := (*C.int64_t)(unsafe.Pointer(&expand1Data[0])) +cexpand1Len := *(*C.int)(unsafe.Pointer(&expand1Len)) +cexpand2DataPtr := (*C.int64_t)(unsafe.Pointer(&expand2Data[0])) +cexpand2Len := *(*C.int)(unsafe.Pointer(&expand2Len)) +cexpand3DataPtr := (*C.int64_t)(unsafe.Pointer(&expand3Data[0])) +cexpand3Len := *(*C.int)(unsafe.Pointer(&expand3Len)) +csumdimDataPtr := (*C.int64_t)(unsafe.Pointer(&sumdimData[0])) +csumdimLen := *(*C.int)(unsafe.Pointer(&sumdimLen)) +cunrollDim := *(*C.int64_t)(unsafe.Pointer(&unrollDim)) +C.atg__trilinear(ptr, i1, i2, i3, cexpand1DataPtr, cexpand1Len, cexpand2DataPtr, cexpand2Len, cexpand3DataPtr, cexpand3Len, csumdimDataPtr, csumdimLen, cunrollDim) +} +func Atg_Unique(ptr *Ctensor, self Ctensor, sorted int32, returnInverse int32){ +csorted := *(*C.int)(unsafe.Pointer(&sorted)) +creturnInverse := *(*C.int)(unsafe.Pointer(&returnInverse)) +C.atg__unique(ptr, self, csorted, creturnInverse) +} +func Atg_Unique2(ptr *Ctensor, self Ctensor, sorted int32, returnInverse int32, returnCounts int32){ +csorted := *(*C.int)(unsafe.Pointer(&sorted)) +creturnInverse := *(*C.int)(unsafe.Pointer(&returnInverse)) +creturnCounts := *(*C.int)(unsafe.Pointer(&returnCounts)) +C.atg__unique2(ptr, self, csorted, creturnInverse, creturnCounts) +} +func Atg_UnsafeView(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int){ +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +C.atg__unsafe_view(ptr, self, csizeDataPtr, csizeLen) +} +func Atg_Values(ptr *Ctensor, self Ctensor){ +C.atg__values(ptr, self) +} +func Atg_Var(ptr *Ctensor, self Ctensor, unbiased int32){ +cunbiased := *(*C.int)(unsafe.Pointer(&unbiased)) +C.atg__var(ptr, self, cunbiased) +} +func Atg_WeightNorm(ptr *Ctensor, v Ctensor, g Ctensor, dim int64){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg__weight_norm(ptr, v, g, cdim) +} +func Atg_WeightNormCudaInterface(ptr *Ctensor, v Ctensor, g Ctensor, dim int64){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg__weight_norm_cuda_interface(ptr, v, g, cdim) +} +func Atg_WeightNormCudaInterfaceBackward(ptr *Ctensor, gradW Ctensor, savedV Ctensor, savedG Ctensor, savedNorms Ctensor, dim int64){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg__weight_norm_cuda_interface_backward(ptr, gradW, savedV, savedG, savedNorms, cdim) +} +func Atg_WeightNormDifferentiableBackward(ptr *Ctensor, gradW Ctensor, savedV Ctensor, savedG Ctensor, savedNorms Ctensor, dim int64){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg__weight_norm_differentiable_backward(ptr, gradW, savedV, savedG, savedNorms, cdim) +} +func AtgAbs(ptr *Ctensor, self Ctensor){ +C.atg_abs(ptr, self) +} +func AtgAbs_(ptr *Ctensor, self Ctensor){ +C.atg_abs_(ptr, self) +} +func AtgAbsOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_abs_out(ptr, out, self) +} +func AtgAcos(ptr *Ctensor, self Ctensor){ +C.atg_acos(ptr, self) +} +func AtgAcos_(ptr *Ctensor, self Ctensor){ +C.atg_acos_(ptr, self) +} +func AtgAcosOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_acos_out(ptr, out, self) +} +func AtgAdaptiveAvgPool1d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +C.atg_adaptive_avg_pool1d(ptr, self, coutputSizeDataPtr, coutputSizeLen) +} +func AtgAdaptiveAvgPool2d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +C.atg_adaptive_avg_pool2d(ptr, self, coutputSizeDataPtr, coutputSizeLen) +} +func AtgAdaptiveAvgPool2dOut(ptr *Ctensor, out Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +C.atg_adaptive_avg_pool2d_out(ptr, out, self, coutputSizeDataPtr, coutputSizeLen) +} +func AtgAdaptiveAvgPool3d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +C.atg_adaptive_avg_pool3d(ptr, self, coutputSizeDataPtr, coutputSizeLen) +} +func AtgAdaptiveAvgPool3dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor){ +C.atg_adaptive_avg_pool3d_backward(ptr, gradOutput, self) +} +func AtgAdaptiveAvgPool3dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor){ +C.atg_adaptive_avg_pool3d_backward_out(ptr, gradInput, gradOutput, self) +} +func AtgAdaptiveAvgPool3dOut(ptr *Ctensor, out Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +C.atg_adaptive_avg_pool3d_out(ptr, out, self, coutputSizeDataPtr, coutputSizeLen) +} +func AtgAdaptiveMaxPool1d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +C.atg_adaptive_max_pool1d(ptr, self, coutputSizeDataPtr, coutputSizeLen) +} +func AtgAdaptiveMaxPool2d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +C.atg_adaptive_max_pool2d(ptr, self, coutputSizeDataPtr, coutputSizeLen) +} +func AtgAdaptiveMaxPool2dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, indices Ctensor){ +C.atg_adaptive_max_pool2d_backward(ptr, gradOutput, self, indices) +} +func AtgAdaptiveMaxPool2dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, indices Ctensor){ +C.atg_adaptive_max_pool2d_backward_out(ptr, gradInput, gradOutput, self, indices) +} +func AtgAdaptiveMaxPool2dOut(ptr *Ctensor, out Ctensor, indices Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +C.atg_adaptive_max_pool2d_out(ptr, out, indices, self, coutputSizeDataPtr, coutputSizeLen) +} +func AtgAdaptiveMaxPool3d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +C.atg_adaptive_max_pool3d(ptr, self, coutputSizeDataPtr, coutputSizeLen) +} +func AtgAdaptiveMaxPool3dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, indices Ctensor){ +C.atg_adaptive_max_pool3d_backward(ptr, gradOutput, self, indices) +} +func AtgAdaptiveMaxPool3dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, indices Ctensor){ +C.atg_adaptive_max_pool3d_backward_out(ptr, gradInput, gradOutput, self, indices) +} +func AtgAdaptiveMaxPool3dOut(ptr *Ctensor, out Ctensor, indices Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +C.atg_adaptive_max_pool3d_out(ptr, out, indices, self, coutputSizeDataPtr, coutputSizeLen) +} +func AtgAdd(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_add(ptr, self, other) +} +func AtgAdd1(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_add1(ptr, self, other ) +} +func AtgAdd_(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_add_(ptr, self, other) +} +func AtgAdd1_(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_add_1(ptr, self, other ) +} +func AtgAddOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ +C.atg_add_out(ptr, out, self, other) +} +func AtgAddbmm(ptr *Ctensor, self Ctensor, batch1 Ctensor, batch2 Ctensor){ +C.atg_addbmm(ptr, self, batch1, batch2) +} +func AtgAddbmm_(ptr *Ctensor, self Ctensor, batch1 Ctensor, batch2 Ctensor){ +C.atg_addbmm_(ptr, self, batch1, batch2) +} +func AtgAddbmmOut(ptr *Ctensor, out Ctensor, self Ctensor, batch1 Ctensor, batch2 Ctensor){ +C.atg_addbmm_out(ptr, out, self, batch1, batch2) +} +func AtgAddcdiv(ptr *Ctensor, self Ctensor, tensor1 Ctensor, tensor2 Ctensor){ +C.atg_addcdiv(ptr, self, tensor1, tensor2) +} +func AtgAddcdiv_(ptr *Ctensor, self Ctensor, tensor1 Ctensor, tensor2 Ctensor){ +C.atg_addcdiv_(ptr, self, tensor1, tensor2) +} +func AtgAddcdivOut(ptr *Ctensor, out Ctensor, self Ctensor, tensor1 Ctensor, tensor2 Ctensor){ +C.atg_addcdiv_out(ptr, out, self, tensor1, tensor2) +} +func AtgAddcmul(ptr *Ctensor, self Ctensor, tensor1 Ctensor, tensor2 Ctensor){ +C.atg_addcmul(ptr, self, tensor1, tensor2) +} +func AtgAddcmul_(ptr *Ctensor, self Ctensor, tensor1 Ctensor, tensor2 Ctensor){ +C.atg_addcmul_(ptr, self, tensor1, tensor2) +} +func AtgAddcmulOut(ptr *Ctensor, out Ctensor, self Ctensor, tensor1 Ctensor, tensor2 Ctensor){ +C.atg_addcmul_out(ptr, out, self, tensor1, tensor2) +} +func AtgAddmm(ptr *Ctensor, self Ctensor, mat1 Ctensor, mat2 Ctensor){ +C.atg_addmm(ptr, self, mat1, mat2) +} +func AtgAddmm_(ptr *Ctensor, self Ctensor, mat1 Ctensor, mat2 Ctensor){ +C.atg_addmm_(ptr, self, mat1, mat2) +} +func AtgAddmmOut(ptr *Ctensor, out Ctensor, self Ctensor, mat1 Ctensor, mat2 Ctensor){ +C.atg_addmm_out(ptr, out, self, mat1, mat2) +} +func AtgAddmv(ptr *Ctensor, self Ctensor, mat Ctensor, vec Ctensor){ +C.atg_addmv(ptr, self, mat, vec) +} +func AtgAddmv_(ptr *Ctensor, self Ctensor, mat Ctensor, vec Ctensor){ +C.atg_addmv_(ptr, self, mat, vec) +} +func AtgAddmvOut(ptr *Ctensor, out Ctensor, self Ctensor, mat Ctensor, vec Ctensor){ +C.atg_addmv_out(ptr, out, self, mat, vec) +} +func AtgAddr(ptr *Ctensor, self Ctensor, vec1 Ctensor, vec2 Ctensor){ +C.atg_addr(ptr, self, vec1, vec2) +} +func AtgAddr_(ptr *Ctensor, self Ctensor, vec1 Ctensor, vec2 Ctensor){ +C.atg_addr_(ptr, self, vec1, vec2) +} +func AtgAddrOut(ptr *Ctensor, out Ctensor, self Ctensor, vec1 Ctensor, vec2 Ctensor){ +C.atg_addr_out(ptr, out, self, vec1, vec2) +} +func AtgAffineGridGenerator(ptr *Ctensor, theta Ctensor, sizeData []int64, sizeLen int, alignCorners int32){ +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) +C.atg_affine_grid_generator(ptr, theta, csizeDataPtr, csizeLen, calignCorners) +} +func AtgAffineGridGeneratorBackward(ptr *Ctensor, grad Ctensor, sizeData []int64, sizeLen int, alignCorners int32){ +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) +C.atg_affine_grid_generator_backward(ptr, grad, csizeDataPtr, csizeLen, calignCorners) +} +func AtgAlias(ptr *Ctensor, self Ctensor){ +C.atg_alias(ptr, self) +} +func AtgAlignAs(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_align_as(ptr, self, other) +} + +func AtgAll(ptr *Ctensor, self Ctensor){ +C.atg_all(ptr, self) +} +func AtgAll1(ptr *Ctensor, self Ctensor, dim int64, keepdim int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_all1(ptr, self, cdim, ckeepdim) +} +func AtgAllOut(ptr *Ctensor, out Ctensor, self Ctensor, dim int64, keepdim int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_all_out(ptr, out, self, cdim, ckeepdim) +} +func AtgAlphaDropout(ptr *Ctensor, input Ctensor, p float64, train int32){ +cp := *(*C.double)(unsafe.Pointer(&p)) +ctrain := *(*C.int)(unsafe.Pointer(&train)) +C.atg_alpha_dropout(ptr, input, cp, ctrain) +} +func AtgAlphaDropout_(ptr *Ctensor, self Ctensor, p float64, train int32){ +cp := *(*C.double)(unsafe.Pointer(&p)) +ctrain := *(*C.int)(unsafe.Pointer(&train)) +C.atg_alpha_dropout_(ptr, self, cp, ctrain) +} +func AtgAngle(ptr *Ctensor, self Ctensor){ +C.atg_angle(ptr, self) +} +func AtgAngleOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_angle_out(ptr, out, self) +} +func AtgAny(ptr *Ctensor, self Ctensor){ +C.atg_any(ptr, self) +} +func AtgAny1(ptr *Ctensor, self Ctensor, dim int64, keepdim int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_any1(ptr, self, cdim, ckeepdim) +} +func AtgAnyOut(ptr *Ctensor, out Ctensor, self Ctensor, dim int64, keepdim int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_any_out(ptr, out, self, cdim, ckeepdim) +} +func AtgArange(ptr *Ctensor, end Cscalar, optionsKind int32, optionsDevice int32){ +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_arange(ptr, end , coptionsKind, coptionsDevice) +} +func AtgArange1(ptr *Ctensor, start Cscalar, end Cscalar, optionsKind int32, optionsDevice int32){ +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_arange1(ptr, start , end , coptionsKind, coptionsDevice) +} +func AtgArange2(ptr *Ctensor, start Cscalar, end Cscalar, step Cscalar, optionsKind int32, optionsDevice int32){ +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_arange2(ptr, start , end , step , coptionsKind, coptionsDevice) +} +func AtgArangeOut(ptr *Ctensor, out Ctensor, end Cscalar){ +C.atg_arange_out(ptr, out, end ) +} +func AtgArangeOut1(ptr *Ctensor, out Ctensor, start Cscalar, end Cscalar){ +C.atg_arange_out1(ptr, out, start , end ) +} +func AtgArgmax(ptr *Ctensor, self Ctensor, dim int64, keepdim int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_argmax(ptr, self, cdim, ckeepdim) +} +func AtgArgmin(ptr *Ctensor, self Ctensor, dim int64, keepdim int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_argmin(ptr, self, cdim, ckeepdim) +} +func AtgArgsort(ptr *Ctensor, self Ctensor, dim int64, descending int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +cdescending := *(*C.int)(unsafe.Pointer(&descending)) +C.atg_argsort(ptr, self, cdim, cdescending) +} +func AtgAsStrided(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int, strideData []int64, strideLen int, storageOffset int64){ +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cstorageOffset := *(*C.int64_t)(unsafe.Pointer(&storageOffset)) +C.atg_as_strided(ptr, self, csizeDataPtr, csizeLen, cstrideDataPtr, cstrideLen, cstorageOffset) +} +func AtgAsStrided_(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int, strideData []int64, strideLen int, storageOffset int64){ +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cstorageOffset := *(*C.int64_t)(unsafe.Pointer(&storageOffset)) +C.atg_as_strided_(ptr, self, csizeDataPtr, csizeLen, cstrideDataPtr, cstrideLen, cstorageOffset) +} +func AtgAsin(ptr *Ctensor, self Ctensor){ +C.atg_asin(ptr, self) +} +func AtgAsin_(ptr *Ctensor, self Ctensor){ +C.atg_asin_(ptr, self) +} +func AtgAsinOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_asin_out(ptr, out, self) +} +func AtgAtan(ptr *Ctensor, self Ctensor){ +C.atg_atan(ptr, self) +} +func AtgAtan2(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_atan2(ptr, self, other) +} +func AtgAtan2_(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_atan2_(ptr, self, other) +} +func AtgAtan2Out(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ +C.atg_atan2_out(ptr, out, self, other) +} +func AtgAtan_(ptr *Ctensor, self Ctensor){ +C.atg_atan_(ptr, self) +} +func AtgAtanOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_atan_out(ptr, out, self) +} +func AtgAvgPool1d(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, ceilMode int32, countIncludePad int32){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) +ccountIncludePad := *(*C.int)(unsafe.Pointer(&countIncludePad)) +C.atg_avg_pool1d(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cceilMode, ccountIncludePad) +} +func AtgAvgPool2d(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, ceilMode int32, countIncludePad int32, divisorOverride int64){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) +ccountIncludePad := *(*C.int)(unsafe.Pointer(&countIncludePad)) +cdivisorOverride := *(*C.int64_t)(unsafe.Pointer(&divisorOverride)) +C.atg_avg_pool2d(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cceilMode, ccountIncludePad, cdivisorOverride) +} +func AtgAvgPool2dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, ceilMode int32, countIncludePad int32, divisorOverride int64){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) +ccountIncludePad := *(*C.int)(unsafe.Pointer(&countIncludePad)) +cdivisorOverride := *(*C.int64_t)(unsafe.Pointer(&divisorOverride)) +C.atg_avg_pool2d_backward(ptr, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cceilMode, ccountIncludePad, cdivisorOverride) +} +func AtgAvgPool2dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, ceilMode int32, countIncludePad int32, divisorOverride int64){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) +ccountIncludePad := *(*C.int)(unsafe.Pointer(&countIncludePad)) +cdivisorOverride := *(*C.int64_t)(unsafe.Pointer(&divisorOverride)) +C.atg_avg_pool2d_backward_out(ptr, gradInput, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cceilMode, ccountIncludePad, cdivisorOverride) +} +func AtgAvgPool2dOut(ptr *Ctensor, out Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, ceilMode int32, countIncludePad int32, divisorOverride int64){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) +ccountIncludePad := *(*C.int)(unsafe.Pointer(&countIncludePad)) +cdivisorOverride := *(*C.int64_t)(unsafe.Pointer(&divisorOverride)) +C.atg_avg_pool2d_out(ptr, out, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cceilMode, ccountIncludePad, cdivisorOverride) +} +func AtgAvgPool3d(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, ceilMode int32, countIncludePad int32, divisorOverride int64){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) +ccountIncludePad := *(*C.int)(unsafe.Pointer(&countIncludePad)) +cdivisorOverride := *(*C.int64_t)(unsafe.Pointer(&divisorOverride)) +C.atg_avg_pool3d(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cceilMode, ccountIncludePad, cdivisorOverride) +} +func AtgAvgPool3dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, ceilMode int32, countIncludePad int32, divisorOverride int64){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) +ccountIncludePad := *(*C.int)(unsafe.Pointer(&countIncludePad)) +cdivisorOverride := *(*C.int64_t)(unsafe.Pointer(&divisorOverride)) +C.atg_avg_pool3d_backward(ptr, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cceilMode, ccountIncludePad, cdivisorOverride) +} +func AtgAvgPool3dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, ceilMode int32, countIncludePad int32, divisorOverride int64){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) +ccountIncludePad := *(*C.int)(unsafe.Pointer(&countIncludePad)) +cdivisorOverride := *(*C.int64_t)(unsafe.Pointer(&divisorOverride)) +C.atg_avg_pool3d_backward_out(ptr, gradInput, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cceilMode, ccountIncludePad, cdivisorOverride) +} +func AtgAvgPool3dOut(ptr *Ctensor, out Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, ceilMode int32, countIncludePad int32, divisorOverride int64){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) +ccountIncludePad := *(*C.int)(unsafe.Pointer(&countIncludePad)) +cdivisorOverride := *(*C.int64_t)(unsafe.Pointer(&divisorOverride)) +C.atg_avg_pool3d_out(ptr, out, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cceilMode, ccountIncludePad, cdivisorOverride) +} +func AtgBaddbmm(ptr *Ctensor, self Ctensor, batch1 Ctensor, batch2 Ctensor){ +C.atg_baddbmm(ptr, self, batch1, batch2) +} +func AtgBaddbmm_(ptr *Ctensor, self Ctensor, batch1 Ctensor, batch2 Ctensor){ +C.atg_baddbmm_(ptr, self, batch1, batch2) +} +func AtgBaddbmmOut(ptr *Ctensor, out Ctensor, self Ctensor, batch1 Ctensor, batch2 Ctensor){ +C.atg_baddbmm_out(ptr, out, self, batch1, batch2) +} +func AtgBartlettWindow(ptr *Ctensor, windowLength int64, optionsKind int32, optionsDevice int32){ +cwindowLength := *(*C.int64_t)(unsafe.Pointer(&windowLength)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_bartlett_window(ptr, cwindowLength, coptionsKind, coptionsDevice) +} +func AtgBartlettWindow1(ptr *Ctensor, windowLength int64, periodic int32, optionsKind int32, optionsDevice int32){ +cwindowLength := *(*C.int64_t)(unsafe.Pointer(&windowLength)) +cperiodic := *(*C.int)(unsafe.Pointer(&periodic)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_bartlett_window1(ptr, cwindowLength, cperiodic, coptionsKind, coptionsDevice) +} +func AtgBatchNorm(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, runningMean Ctensor, runningVar Ctensor, training int32, momentum float64, eps float64, cudnnEnabled int32){ +ctraining := *(*C.int)(unsafe.Pointer(&training)) +cmomentum := *(*C.double)(unsafe.Pointer(&momentum)) +ceps := *(*C.double)(unsafe.Pointer(&eps)) +ccudnnEnabled := *(*C.int)(unsafe.Pointer(&cudnnEnabled)) +C.atg_batch_norm(ptr, input, weight, bias, runningMean, runningVar, ctraining, cmomentum, ceps, ccudnnEnabled) +} +func AtgBatchNormBackwardElemt(ptr *Ctensor, gradOut Ctensor, input Ctensor, mean Ctensor, invstd Ctensor, weight Ctensor, meanDy Ctensor, meanDyXmu Ctensor){ +C.atg_batch_norm_backward_elemt(ptr, gradOut, input, mean, invstd, weight, meanDy, meanDyXmu) +} +func AtgBatchNormBackwardReduce(ptr *Ctensor, gradOut Ctensor, input Ctensor, mean Ctensor, invstd Ctensor, weight Ctensor, inputG int32, weightG int32, biasG int32){ +cinputG := *(*C.int)(unsafe.Pointer(&inputG)) +cweightG := *(*C.int)(unsafe.Pointer(&weightG)) +cbiasG := *(*C.int)(unsafe.Pointer(&biasG)) +C.atg_batch_norm_backward_reduce(ptr, gradOut, input, mean, invstd, weight, cinputG, cweightG, cbiasG) +} +func AtgBatchNormElemt(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, mean Ctensor, invstd Ctensor, eps float64){ +ceps := *(*C.double)(unsafe.Pointer(&eps)) +C.atg_batch_norm_elemt(ptr, input, weight, bias, mean, invstd, ceps) +} +func AtgBatchNormElemtOut(ptr *Ctensor, out Ctensor, input Ctensor, weight Ctensor, bias Ctensor, mean Ctensor, invstd Ctensor, eps float64){ +ceps := *(*C.double)(unsafe.Pointer(&eps)) +C.atg_batch_norm_elemt_out(ptr, out, input, weight, bias, mean, invstd, ceps) +} +func AtgBatchNormGatherStats(ptr *Ctensor, input Ctensor, mean Ctensor, invstd Ctensor, runningMean Ctensor, runningVar Ctensor, momentum float64, eps float64, count int64){ +cmomentum := *(*C.double)(unsafe.Pointer(&momentum)) +ceps := *(*C.double)(unsafe.Pointer(&eps)) +ccount := *(*C.int64_t)(unsafe.Pointer(&count)) +C.atg_batch_norm_gather_stats(ptr, input, mean, invstd, runningMean, runningVar, cmomentum, ceps, ccount) +} +func AtgBatchNormGatherStatsWithCounts(ptr *Ctensor, input Ctensor, mean Ctensor, invstd Ctensor, runningMean Ctensor, runningVar Ctensor, momentum float64, eps float64, countsData []int64, countsLen int){ +cmomentum := *(*C.double)(unsafe.Pointer(&momentum)) +ceps := *(*C.double)(unsafe.Pointer(&eps)) +ccountsDataPtr := (*C.int64_t)(unsafe.Pointer(&countsData[0])) +ccountsLen := *(*C.int)(unsafe.Pointer(&countsLen)) +C.atg_batch_norm_gather_stats_with_counts(ptr, input, mean, invstd, runningMean, runningVar, cmomentum, ceps, ccountsDataPtr, ccountsLen) +} +func AtgBatchNormStats(ptr *Ctensor, input Ctensor, eps float64){ +ceps := *(*C.double)(unsafe.Pointer(&eps)) +C.atg_batch_norm_stats(ptr, input, ceps) +} +func AtgBatchNormUpdateStats(ptr *Ctensor, input Ctensor, runningMean Ctensor, runningVar Ctensor, momentum float64){ +cmomentum := *(*C.double)(unsafe.Pointer(&momentum)) +C.atg_batch_norm_update_stats(ptr, input, runningMean, runningVar, cmomentum) +} +func AtgBernoulli(ptr *Ctensor, self Ctensor){ +C.atg_bernoulli(ptr, self) +} +func AtgBernoulli1(ptr *Ctensor, self Ctensor, p float64){ +cp := *(*C.double)(unsafe.Pointer(&p)) +C.atg_bernoulli1(ptr, self, cp) +} +func AtgBernoulli_(ptr *Ctensor, self Ctensor, p Ctensor){ +C.atg_bernoulli_(ptr, self, p) +} +func AtgBernoulli1_(ptr *Ctensor, self Ctensor, p float64){ +cp := *(*C.double)(unsafe.Pointer(&p)) +C.atg_bernoulli_1(ptr, self, cp) +} +func AtgBernoulliOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_bernoulli_out(ptr, out, self) +} +func AtgBilinear(ptr *Ctensor, input1 Ctensor, input2 Ctensor, weight Ctensor, bias Ctensor){ +C.atg_bilinear(ptr, input1, input2, weight, bias) +} +func AtgBinaryCrossEntropy(ptr *Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +C.atg_binary_cross_entropy(ptr, self, target, weight, creduction) +} +func AtgBinaryCrossEntropyBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +C.atg_binary_cross_entropy_backward(ptr, gradOutput, self, target, weight, creduction) +} +func AtgBinaryCrossEntropyBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +C.atg_binary_cross_entropy_backward_out(ptr, gradInput, gradOutput, self, target, weight, creduction) +} +func AtgBinaryCrossEntropyOut(ptr *Ctensor, out Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +C.atg_binary_cross_entropy_out(ptr, out, self, target, weight, creduction) +} +func AtgBinaryCrossEntropyWithLogits(ptr *Ctensor, self Ctensor, target Ctensor, weight Ctensor, posWeight Ctensor, reduction int64){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +C.atg_binary_cross_entropy_with_logits(ptr, self, target, weight, posWeight, creduction) +} +func AtgBinaryCrossEntropyWithLogitsBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, weight Ctensor, posWeight Ctensor, reduction int64){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +C.atg_binary_cross_entropy_with_logits_backward(ptr, gradOutput, self, target, weight, posWeight, creduction) +} +func AtgBincount(ptr *Ctensor, self Ctensor, weights Ctensor, minlength int64){ +cminlength := *(*C.int64_t)(unsafe.Pointer(&minlength)) +C.atg_bincount(ptr, self, weights, cminlength) +} +func AtgBitwiseAnd(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_bitwise_and(ptr, self, other ) +} +func AtgBitwiseAnd1(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_bitwise_and1(ptr, self, other) +} +func AtgBitwiseAnd_(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_bitwise_and_(ptr, self, other ) +} +func AtgBitwiseAnd1_(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_bitwise_and_1(ptr, self, other) +} +func AtgBitwiseAndOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ +C.atg_bitwise_and_out(ptr, out, self, other) +} +func AtgBitwiseAndOut1(ptr *Ctensor, out Ctensor, self Ctensor, other Cscalar){ +C.atg_bitwise_and_out1(ptr, out, self, other ) +} +func AtgBitwiseNot(ptr *Ctensor, self Ctensor){ +C.atg_bitwise_not(ptr, self) +} +func AtgBitwiseNot_(ptr *Ctensor, self Ctensor){ +C.atg_bitwise_not_(ptr, self) +} +func AtgBitwiseNotOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_bitwise_not_out(ptr, out, self) +} +func AtgBitwiseOr(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_bitwise_or(ptr, self, other ) +} +func AtgBitwiseOr1(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_bitwise_or1(ptr, self, other) +} +func AtgBitwiseOr_(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_bitwise_or_(ptr, self, other ) +} +func AtgBitwiseOr1_(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_bitwise_or_1(ptr, self, other) +} +func AtgBitwiseOrOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ +C.atg_bitwise_or_out(ptr, out, self, other) +} +func AtgBitwiseOrOut1(ptr *Ctensor, out Ctensor, self Ctensor, other Cscalar){ +C.atg_bitwise_or_out1(ptr, out, self, other ) +} +func AtgBitwiseXor(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_bitwise_xor(ptr, self, other ) +} +func AtgBitwiseXor1(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_bitwise_xor1(ptr, self, other) +} +func AtgBitwiseXor_(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_bitwise_xor_(ptr, self, other ) +} +func AtgBitwiseXor1_(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_bitwise_xor_1(ptr, self, other) +} +func AtgBitwiseXorOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ +C.atg_bitwise_xor_out(ptr, out, self, other) +} +func AtgBitwiseXorOut1(ptr *Ctensor, out Ctensor, self Ctensor, other Cscalar){ +C.atg_bitwise_xor_out1(ptr, out, self, other ) +} +func AtgBlackmanWindow(ptr *Ctensor, windowLength int64, optionsKind int32, optionsDevice int32){ +cwindowLength := *(*C.int64_t)(unsafe.Pointer(&windowLength)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_blackman_window(ptr, cwindowLength, coptionsKind, coptionsDevice) +} +func AtgBlackmanWindow1(ptr *Ctensor, windowLength int64, periodic int32, optionsKind int32, optionsDevice int32){ +cwindowLength := *(*C.int64_t)(unsafe.Pointer(&windowLength)) +cperiodic := *(*C.int)(unsafe.Pointer(&periodic)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_blackman_window1(ptr, cwindowLength, cperiodic, coptionsKind, coptionsDevice) +} +func AtgBmm(ptr *Ctensor, self Ctensor, mat2 Ctensor){ +C.atg_bmm(ptr, self, mat2) +} +func AtgBmmOut(ptr *Ctensor, out Ctensor, self Ctensor, mat2 Ctensor){ +C.atg_bmm_out(ptr, out, self, mat2) +} + +func AtgCartesianProd(ptr *Ctensor, tensorsData []Ctensor, tensorsLen int){ +ctensorsDataPtr := (*Ctensor)(unsafe.Pointer(&tensorsData[0])) +ctensorsLen := *(*C.int)(unsafe.Pointer(&tensorsLen)) +C.atg_cartesian_prod(ptr, ctensorsDataPtr, ctensorsLen) +} +func AtgCat(ptr *Ctensor, tensorsData []Ctensor, tensorsLen int, dim int64){ +ctensorsDataPtr := (*Ctensor)(unsafe.Pointer(&tensorsData[0])) +ctensorsLen := *(*C.int)(unsafe.Pointer(&tensorsLen)) +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_cat(ptr, ctensorsDataPtr, ctensorsLen, cdim) +} +func AtgCatOut(ptr *Ctensor, out Ctensor, tensorsData []Ctensor, tensorsLen int, dim int64){ +ctensorsDataPtr := (*Ctensor)(unsafe.Pointer(&tensorsData[0])) +ctensorsLen := *(*C.int)(unsafe.Pointer(&tensorsLen)) +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_cat_out(ptr, out, ctensorsDataPtr, ctensorsLen, cdim) +} +func AtgCauchy_(ptr *Ctensor, self Ctensor, median float64, sigma float64){ +cmedian := *(*C.double)(unsafe.Pointer(&median)) +csigma := *(*C.double)(unsafe.Pointer(&sigma)) +C.atg_cauchy_(ptr, self, cmedian, csigma) +} +func AtgCdist(ptr *Ctensor, x1 Ctensor, x2 Ctensor, p float64, computeMode int64){ +cp := *(*C.double)(unsafe.Pointer(&p)) +ccomputeMode := *(*C.int64_t)(unsafe.Pointer(&computeMode)) +C.atg_cdist(ptr, x1, x2, cp, ccomputeMode) +} +func AtgCeil(ptr *Ctensor, self Ctensor){ +C.atg_ceil(ptr, self) +} +func AtgCeil_(ptr *Ctensor, self Ctensor){ +C.atg_ceil_(ptr, self) +} +func AtgCeilOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_ceil_out(ptr, out, self) +} +func AtgCelu(ptr *Ctensor, self Ctensor){ +C.atg_celu(ptr, self) +} +func AtgCelu_(ptr *Ctensor, self Ctensor){ +C.atg_celu_(ptr, self) +} +func AtgChainMatmul(ptr *Ctensor, matricesData []Ctensor, matricesLen int){ +cmatricesDataPtr := (*Ctensor)(unsafe.Pointer(&matricesData[0])) +cmatricesLen := *(*C.int)(unsafe.Pointer(&matricesLen)) +C.atg_chain_matmul(ptr, cmatricesDataPtr, cmatricesLen) +} +func AtgCholesky(ptr *Ctensor, self Ctensor, upper int32){ +cupper := *(*C.int)(unsafe.Pointer(&upper)) +C.atg_cholesky(ptr, self, cupper) +} +func AtgCholeskyInverse(ptr *Ctensor, self Ctensor, upper int32){ +cupper := *(*C.int)(unsafe.Pointer(&upper)) +C.atg_cholesky_inverse(ptr, self, cupper) +} +func AtgCholeskyInverseOut(ptr *Ctensor, out Ctensor, self Ctensor, upper int32){ +cupper := *(*C.int)(unsafe.Pointer(&upper)) +C.atg_cholesky_inverse_out(ptr, out, self, cupper) +} +func AtgCholeskyOut(ptr *Ctensor, out Ctensor, self Ctensor, upper int32){ +cupper := *(*C.int)(unsafe.Pointer(&upper)) +C.atg_cholesky_out(ptr, out, self, cupper) +} +func AtgCholeskySolve(ptr *Ctensor, self Ctensor, input2 Ctensor, upper int32){ +cupper := *(*C.int)(unsafe.Pointer(&upper)) +C.atg_cholesky_solve(ptr, self, input2, cupper) +} +func AtgCholeskySolveOut(ptr *Ctensor, out Ctensor, self Ctensor, input2 Ctensor, upper int32){ +cupper := *(*C.int)(unsafe.Pointer(&upper)) +C.atg_cholesky_solve_out(ptr, out, self, input2, cupper) +} + +func AtgClamp(ptr *Ctensor, self Ctensor, min Cscalar, max Cscalar){ +C.atg_clamp(ptr, self, min , max ) +} +func AtgClamp_(ptr *Ctensor, self Ctensor, min Cscalar, max Cscalar){ +C.atg_clamp_(ptr, self, min , max ) +} +func AtgClampMax(ptr *Ctensor, self Ctensor, max Cscalar){ +C.atg_clamp_max(ptr, self, max ) +} +func AtgClampMax_(ptr *Ctensor, self Ctensor, max Cscalar){ +C.atg_clamp_max_(ptr, self, max ) +} +func AtgClampMaxOut(ptr *Ctensor, out Ctensor, self Ctensor, max Cscalar){ +C.atg_clamp_max_out(ptr, out, self, max ) +} +func AtgClampMin(ptr *Ctensor, self Ctensor, min Cscalar){ +C.atg_clamp_min(ptr, self, min ) +} +func AtgClampMin_(ptr *Ctensor, self Ctensor, min Cscalar){ +C.atg_clamp_min_(ptr, self, min ) +} +func AtgClampMinOut(ptr *Ctensor, out Ctensor, self Ctensor, min Cscalar){ +C.atg_clamp_min_out(ptr, out, self, min ) +} +func AtgClampOut(ptr *Ctensor, out Ctensor, self Ctensor, min Cscalar, max Cscalar){ +C.atg_clamp_out(ptr, out, self, min , max ) +} +func AtgCoalesce(ptr *Ctensor, self Ctensor){ +C.atg_coalesce(ptr, self) +} +func AtgCol2im(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, kernelSizeData []int64, kernelSizeLen int, dilationData []int64, dilationLen int, paddingData []int64, paddingLen int, strideData []int64, strideLen int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +C.atg_col2im(ptr, self, coutputSizeDataPtr, coutputSizeLen, ckernelSizeDataPtr, ckernelSizeLen, cdilationDataPtr, cdilationLen, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen) +} +func AtgCol2imBackward(ptr *Ctensor, gradOutput Ctensor, kernelSizeData []int64, kernelSizeLen int, dilationData []int64, dilationLen int, paddingData []int64, paddingLen int, strideData []int64, strideLen int){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +C.atg_col2im_backward(ptr, gradOutput, ckernelSizeDataPtr, ckernelSizeLen, cdilationDataPtr, cdilationLen, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen) +} +func AtgCol2imBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, kernelSizeData []int64, kernelSizeLen int, dilationData []int64, dilationLen int, paddingData []int64, paddingLen int, strideData []int64, strideLen int){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +C.atg_col2im_backward_out(ptr, gradInput, gradOutput, ckernelSizeDataPtr, ckernelSizeLen, cdilationDataPtr, cdilationLen, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen) +} +func AtgCol2imOut(ptr *Ctensor, out Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, kernelSizeData []int64, kernelSizeLen int, dilationData []int64, dilationLen int, paddingData []int64, paddingLen int, strideData []int64, strideLen int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +C.atg_col2im_out(ptr, out, self, coutputSizeDataPtr, coutputSizeLen, ckernelSizeDataPtr, ckernelSizeLen, cdilationDataPtr, cdilationLen, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen) +} +func AtgCombinations(ptr *Ctensor, self Ctensor, r int64, withReplacement int32){ +cr := *(*C.int64_t)(unsafe.Pointer(&r)) +cwithReplacement := *(*C.int)(unsafe.Pointer(&withReplacement)) +C.atg_combinations(ptr, self, cr, cwithReplacement) +} +func AtgConj(ptr *Ctensor, self Ctensor){ +C.atg_conj(ptr, self) +} +func AtgConjOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_conj_out(ptr, out, self) +} +func AtgConstantPadNd(ptr *Ctensor, self Ctensor, padData []int64, padLen int){ +cpadDataPtr := (*C.int64_t)(unsafe.Pointer(&padData[0])) +cpadLen := *(*C.int)(unsafe.Pointer(&padLen)) +C.atg_constant_pad_nd(ptr, self, cpadDataPtr, cpadLen) +} +func AtgContiguous(ptr *Ctensor, self Ctensor){ +C.atg_contiguous(ptr, self) +} +func AtgConv1d(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, groups int64){ +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) +C.atg_conv1d(ptr, input, weight, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cgroups) +} +func AtgConv2d(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, groups int64){ +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) +C.atg_conv2d(ptr, input, weight, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cgroups) +} +func AtgConv3d(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, groups int64){ +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) +C.atg_conv3d(ptr, input, weight, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cgroups) +} +func AtgConvTbc(ptr *Ctensor, self Ctensor, weight Ctensor, bias Ctensor, pad int64){ +cpad := *(*C.int64_t)(unsafe.Pointer(&pad)) +C.atg_conv_tbc(ptr, self, weight, bias, cpad) +} +func AtgConvTbcBackward(ptr *Ctensor, self Ctensor, input Ctensor, weight Ctensor, bias Ctensor, pad int64){ +cpad := *(*C.int64_t)(unsafe.Pointer(&pad)) +C.atg_conv_tbc_backward(ptr, self, input, weight, bias, cpad) +} +func AtgConvTranspose1d(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, outputPaddingData []int64, outputPaddingLen int, groups int64, dilationData []int64, dilationLen int){ +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) +coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) +cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +C.atg_conv_transpose1d(ptr, input, weight, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, coutputPaddingDataPtr, coutputPaddingLen, cgroups, cdilationDataPtr, cdilationLen) +} +func AtgConvTranspose2d(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, outputPaddingData []int64, outputPaddingLen int, groups int64, dilationData []int64, dilationLen int){ +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) +coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) +cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +C.atg_conv_transpose2d(ptr, input, weight, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, coutputPaddingDataPtr, coutputPaddingLen, cgroups, cdilationDataPtr, cdilationLen) +} +func AtgConvTranspose3d(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, outputPaddingData []int64, outputPaddingLen int, groups int64, dilationData []int64, dilationLen int){ +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) +coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) +cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +C.atg_conv_transpose3d(ptr, input, weight, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, coutputPaddingDataPtr, coutputPaddingLen, cgroups, cdilationDataPtr, cdilationLen) +} +func AtgConvolution(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, transposed int32, outputPaddingData []int64, outputPaddingLen int, groups int64){ +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +ctransposed := *(*C.int)(unsafe.Pointer(&transposed)) +coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) +coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) +cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) +C.atg_convolution(ptr, input, weight, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, ctransposed, coutputPaddingDataPtr, coutputPaddingLen, cgroups) +} +func AtgConvolutionOverrideable(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, transposed int32, outputPaddingData []int64, outputPaddingLen int, groups int64){ +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +ctransposed := *(*C.int)(unsafe.Pointer(&transposed)) +coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) +coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) +cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) +C.atg_convolution_overrideable(ptr, input, weight, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, ctransposed, coutputPaddingDataPtr, coutputPaddingLen, cgroups) +} +func AtgCopySparseToSparse_(ptr *Ctensor, self Ctensor, src Ctensor, nonBlocking int32){ +cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) +C.atg_copy_sparse_to_sparse_(ptr, self, src, cnonBlocking) +} +func AtgCos(ptr *Ctensor, self Ctensor){ +C.atg_cos(ptr, self) +} +func AtgCos_(ptr *Ctensor, self Ctensor){ +C.atg_cos_(ptr, self) +} +func AtgCosOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_cos_out(ptr, out, self) +} +func AtgCosh(ptr *Ctensor, self Ctensor){ +C.atg_cosh(ptr, self) +} +func AtgCosh_(ptr *Ctensor, self Ctensor){ +C.atg_cosh_(ptr, self) +} +func AtgCoshOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_cosh_out(ptr, out, self) +} +func AtgCosineEmbeddingLoss(ptr *Ctensor, input1 Ctensor, input2 Ctensor, target Ctensor, margin float64, reduction int64){ +cmargin := *(*C.double)(unsafe.Pointer(&margin)) +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +C.atg_cosine_embedding_loss(ptr, input1, input2, target, cmargin, creduction) +} +func AtgCosineSimilarity(ptr *Ctensor, x1 Ctensor, x2 Ctensor, dim int64, eps float64){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +ceps := *(*C.double)(unsafe.Pointer(&eps)) +C.atg_cosine_similarity(ptr, x1, x2, cdim, ceps) +} +func AtgCross(ptr *Ctensor, self Ctensor, other Ctensor, dim int64){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_cross(ptr, self, other, cdim) +} +func AtgCrossOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor, dim int64){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_cross_out(ptr, out, self, other, cdim) +} +func AtgCtcLoss(ptr *Ctensor, logProbs Ctensor, targets Ctensor, inputLengthsData []int64, inputLengthsLen int, targetLengthsData []int64, targetLengthsLen int, blank int64, reduction int64, zeroInfinity int32){ +cinputLengthsDataPtr := (*C.int64_t)(unsafe.Pointer(&inputLengthsData[0])) +cinputLengthsLen := *(*C.int)(unsafe.Pointer(&inputLengthsLen)) +ctargetLengthsDataPtr := (*C.int64_t)(unsafe.Pointer(&targetLengthsData[0])) +ctargetLengthsLen := *(*C.int)(unsafe.Pointer(&targetLengthsLen)) +cblank := *(*C.int64_t)(unsafe.Pointer(&blank)) +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +czeroInfinity := *(*C.int)(unsafe.Pointer(&zeroInfinity)) +C.atg_ctc_loss(ptr, logProbs, targets, cinputLengthsDataPtr, cinputLengthsLen, ctargetLengthsDataPtr, ctargetLengthsLen, cblank, creduction, czeroInfinity) +} +func AtgCtcLoss1(ptr *Ctensor, logProbs Ctensor, targets Ctensor, inputLengths Ctensor, targetLengths Ctensor, blank int64, reduction int64, zeroInfinity int32){ +cblank := *(*C.int64_t)(unsafe.Pointer(&blank)) +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +czeroInfinity := *(*C.int)(unsafe.Pointer(&zeroInfinity)) +C.atg_ctc_loss1(ptr, logProbs, targets, inputLengths, targetLengths, cblank, creduction, czeroInfinity) +} +func AtgCudnnAffineGridGenerator(ptr *Ctensor, theta Ctensor, n int64, c int64, h int64, w int64){ +cn := *(*C.int64_t)(unsafe.Pointer(&n)) +cc := *(*C.int64_t)(unsafe.Pointer(&c)) +ch := *(*C.int64_t)(unsafe.Pointer(&h)) +cw := *(*C.int64_t)(unsafe.Pointer(&w)) +C.atg_cudnn_affine_grid_generator(ptr, theta, cn, cc, ch, cw) +} +func AtgCudnnAffineGridGeneratorBackward(ptr *Ctensor, grad Ctensor, n int64, c int64, h int64, w int64){ +cn := *(*C.int64_t)(unsafe.Pointer(&n)) +cc := *(*C.int64_t)(unsafe.Pointer(&c)) +ch := *(*C.int64_t)(unsafe.Pointer(&h)) +cw := *(*C.int64_t)(unsafe.Pointer(&w)) +C.atg_cudnn_affine_grid_generator_backward(ptr, grad, cn, cc, ch, cw) +} +func AtgCudnnBatchNorm(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, runningMean Ctensor, runningVar Ctensor, training int32, exponentialAverageFactor float64, epsilon float64){ +ctraining := *(*C.int)(unsafe.Pointer(&training)) +cexponentialAverageFactor := *(*C.double)(unsafe.Pointer(&exponentialAverageFactor)) +cepsilon := *(*C.double)(unsafe.Pointer(&epsilon)) +C.atg_cudnn_batch_norm(ptr, input, weight, bias, runningMean, runningVar, ctraining, cexponentialAverageFactor, cepsilon) +} +func AtgCudnnBatchNormBackward(ptr *Ctensor, input Ctensor, gradOutput Ctensor, weight Ctensor, runningMean Ctensor, runningVar Ctensor, saveMean Ctensor, saveVar Ctensor, epsilon float64, reserveSpace Ctensor){ +cepsilon := *(*C.double)(unsafe.Pointer(&epsilon)) +C.atg_cudnn_batch_norm_backward(ptr, input, gradOutput, weight, runningMean, runningVar, saveMean, saveVar, cepsilon, reserveSpace) +} +func AtgCudnnConvolution(ptr *Ctensor, self Ctensor, weight Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){ +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) +cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) +cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) +C.atg_cudnn_convolution(ptr, self, weight, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) +} +func AtgCudnnConvolution1(ptr *Ctensor, self Ctensor, weight Ctensor, bias Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){ +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) +cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) +cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) +C.atg_cudnn_convolution1(ptr, self, weight, bias, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) +} +func AtgCudnnConvolutionBackwardInput(ptr *Ctensor, selfSizeData []int64, selfSizeLen int, gradOutput Ctensor, weight Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){ +cselfSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&selfSizeData[0])) +cselfSizeLen := *(*C.int)(unsafe.Pointer(&selfSizeLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) +cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) +cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) +C.atg_cudnn_convolution_backward_input(ptr, cselfSizeDataPtr, cselfSizeLen, gradOutput, weight, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) +} +func AtgCudnnConvolutionBackwardWeight(ptr *Ctensor, weightSizeData []int64, weightSizeLen int, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){ +cweightSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&weightSizeData[0])) +cweightSizeLen := *(*C.int)(unsafe.Pointer(&weightSizeLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) +cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) +cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) +C.atg_cudnn_convolution_backward_weight(ptr, cweightSizeDataPtr, cweightSizeLen, gradOutput, self, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) +} +func AtgCudnnConvolutionTranspose(ptr *Ctensor, self Ctensor, weight Ctensor, paddingData []int64, paddingLen int, outputPaddingData []int64, outputPaddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){ +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) +coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) +cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) +cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) +C.atg_cudnn_convolution_transpose(ptr, self, weight, cpaddingDataPtr, cpaddingLen, coutputPaddingDataPtr, coutputPaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) +} +func AtgCudnnConvolutionTranspose1(ptr *Ctensor, self Ctensor, weight Ctensor, bias Ctensor, paddingData []int64, paddingLen int, outputPaddingData []int64, outputPaddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){ +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) +coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) +cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) +cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) +C.atg_cudnn_convolution_transpose1(ptr, self, weight, bias, cpaddingDataPtr, cpaddingLen, coutputPaddingDataPtr, coutputPaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) +} +func AtgCudnnConvolutionTransposeBackwardInput(ptr *Ctensor, gradOutput Ctensor, weight Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){ +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) +cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) +cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) +C.atg_cudnn_convolution_transpose_backward_input(ptr, gradOutput, weight, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) +} +func AtgCudnnConvolutionTransposeBackwardWeight(ptr *Ctensor, weightSizeData []int64, weightSizeLen int, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){ +cweightSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&weightSizeData[0])) +cweightSizeLen := *(*C.int)(unsafe.Pointer(&weightSizeLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) +cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) +cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) +C.atg_cudnn_convolution_transpose_backward_weight(ptr, cweightSizeDataPtr, cweightSizeLen, gradOutput, self, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) +} +func AtgCudnnGridSampler(ptr *Ctensor, self Ctensor, grid Ctensor){ +C.atg_cudnn_grid_sampler(ptr, self, grid) +} +func AtgCudnnGridSamplerBackward(ptr *Ctensor, self Ctensor, grid Ctensor, gradOutput Ctensor){ +C.atg_cudnn_grid_sampler_backward(ptr, self, grid, gradOutput) +} +func AtgCummax(ptr *Ctensor, self Ctensor, dim int64){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_cummax(ptr, self, cdim) +} +func AtgCummaxOut(ptr *Ctensor, values Ctensor, indices Ctensor, self Ctensor, dim int64){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_cummax_out(ptr, values, indices, self, cdim) +} +func AtgCummin(ptr *Ctensor, self Ctensor, dim int64){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_cummin(ptr, self, cdim) +} +func AtgCumminOut(ptr *Ctensor, values Ctensor, indices Ctensor, self Ctensor, dim int64){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_cummin_out(ptr, values, indices, self, cdim) +} +func AtgCumprod(ptr *Ctensor, self Ctensor, dim int64, dtype int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +cdtype := *(*C.int)(unsafe.Pointer(&dtype)) +C.atg_cumprod(ptr, self, cdim, cdtype) +} +func AtgCumprodOut(ptr *Ctensor, out Ctensor, self Ctensor, dim int64, dtype int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +cdtype := *(*C.int)(unsafe.Pointer(&dtype)) +C.atg_cumprod_out(ptr, out, self, cdim, cdtype) +} +func AtgCumsum(ptr *Ctensor, self Ctensor, dim int64, dtype int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +cdtype := *(*C.int)(unsafe.Pointer(&dtype)) +C.atg_cumsum(ptr, self, cdim, cdtype) +} +func AtgCumsumOut(ptr *Ctensor, out Ctensor, self Ctensor, dim int64, dtype int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +cdtype := *(*C.int)(unsafe.Pointer(&dtype)) +C.atg_cumsum_out(ptr, out, self, cdim, cdtype) +} +func AtgData(ptr *Ctensor, self Ctensor){ +C.atg_data(ptr, self) +} +func AtgDequantize(ptr *Ctensor, self Ctensor){ +C.atg_dequantize(ptr, self) +} +func AtgDet(ptr *Ctensor, self Ctensor){ +C.atg_det(ptr, self) +} +func AtgDetach(ptr *Ctensor, self Ctensor){ +C.atg_detach(ptr, self) +} +func AtgDetach_(ptr *Ctensor, self Ctensor){ +C.atg_detach_(ptr, self) +} +func AtgDiag(ptr *Ctensor, self Ctensor, diagonal int64){ +cdiagonal := *(*C.int64_t)(unsafe.Pointer(&diagonal)) +C.atg_diag(ptr, self, cdiagonal) +} +func AtgDiagEmbed(ptr *Ctensor, self Ctensor, offset int64, dim1 int64, dim2 int64){ +coffset := *(*C.int64_t)(unsafe.Pointer(&offset)) +cdim1 := *(*C.int64_t)(unsafe.Pointer(&dim1)) +cdim2 := *(*C.int64_t)(unsafe.Pointer(&dim2)) +C.atg_diag_embed(ptr, self, coffset, cdim1, cdim2) +} +func AtgDiagOut(ptr *Ctensor, out Ctensor, self Ctensor, diagonal int64){ +cdiagonal := *(*C.int64_t)(unsafe.Pointer(&diagonal)) +C.atg_diag_out(ptr, out, self, cdiagonal) +} +func AtgDiagflat(ptr *Ctensor, self Ctensor, offset int64){ +coffset := *(*C.int64_t)(unsafe.Pointer(&offset)) +C.atg_diagflat(ptr, self, coffset) +} +func AtgDiagonal(ptr *Ctensor, self Ctensor, offset int64, dim1 int64, dim2 int64){ +coffset := *(*C.int64_t)(unsafe.Pointer(&offset)) +cdim1 := *(*C.int64_t)(unsafe.Pointer(&dim1)) +cdim2 := *(*C.int64_t)(unsafe.Pointer(&dim2)) +C.atg_diagonal(ptr, self, coffset, cdim1, cdim2) +} +func AtgDigamma(ptr *Ctensor, self Ctensor){ +C.atg_digamma(ptr, self) +} +func AtgDigamma_(ptr *Ctensor, self Ctensor){ +C.atg_digamma_(ptr, self) +} +func AtgDigammaOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_digamma_out(ptr, out, self) +} +func AtgDist(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_dist(ptr, self, other) +} +func AtgDiv(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_div(ptr, self, other) +} +func AtgDiv1(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_div1(ptr, self, other ) +} +func AtgDiv_(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_div_(ptr, self, other) +} +func AtgDiv1_(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_div_1(ptr, self, other ) +} +func AtgDivOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ +C.atg_div_out(ptr, out, self, other) +} +func AtgDot(ptr *Ctensor, self Ctensor, tensor Ctensor){ +C.atg_dot(ptr, self, tensor) +} +func AtgDotOut(ptr *Ctensor, out Ctensor, self Ctensor, tensor Ctensor){ +C.atg_dot_out(ptr, out, self, tensor) +} +func AtgDropout(ptr *Ctensor, input Ctensor, p float64, train int32){ +cp := *(*C.double)(unsafe.Pointer(&p)) +ctrain := *(*C.int)(unsafe.Pointer(&train)) +C.atg_dropout(ptr, input, cp, ctrain) +} +func AtgDropout_(ptr *Ctensor, self Ctensor, p float64, train int32){ +cp := *(*C.double)(unsafe.Pointer(&p)) +ctrain := *(*C.int)(unsafe.Pointer(&train)) +C.atg_dropout_(ptr, self, cp, ctrain) +} +func AtgEig(ptr *Ctensor, self Ctensor, eigenvectors int32){ +ceigenvectors := *(*C.int)(unsafe.Pointer(&eigenvectors)) +C.atg_eig(ptr, self, ceigenvectors) +} +func AtgEigOut(ptr *Ctensor, e Ctensor, v Ctensor, self Ctensor, eigenvectors int32){ +ceigenvectors := *(*C.int)(unsafe.Pointer(&eigenvectors)) +C.atg_eig_out(ptr, e, v, self, ceigenvectors) +} +func AtgEinsum(ptr *Ctensor, equation string, tensorsData []Ctensor, tensorsLen int){ +cequation := C.CString(equation) +equationLen := len(equation) +cequationLen := *(*C.int)(unsafe.Pointer(&equationLen)) +ctensorsDataPtr := (*Ctensor)(unsafe.Pointer(&tensorsData[0])) +ctensorsLen := *(*C.int)(unsafe.Pointer(&tensorsLen)) +C.atg_einsum(ptr, cequation, cequationLen, ctensorsDataPtr, ctensorsLen) +} +func AtgElu(ptr *Ctensor, self Ctensor){ +C.atg_elu(ptr, self) +} +func AtgElu_(ptr *Ctensor, self Ctensor){ +C.atg_elu_(ptr, self) +} +func AtgEluBackward(ptr *Ctensor, gradOutput Ctensor, alpha Cscalar, scale Cscalar, inputScale Cscalar, output Ctensor){ +C.atg_elu_backward(ptr, gradOutput, alpha , scale , inputScale , output) +} +func AtgEluBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, alpha Cscalar, scale Cscalar, inputScale Cscalar, output Ctensor){ +C.atg_elu_backward_out(ptr, gradInput, gradOutput, alpha , scale , inputScale , output) +} +func AtgEluOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_elu_out(ptr, out, self) +} +func AtgEmbedding(ptr *Ctensor, weight Ctensor, indices Ctensor, paddingIdx int64, scaleGradByFreq int32, sparse int32){ +cpaddingIdx := *(*C.int64_t)(unsafe.Pointer(&paddingIdx)) +cscaleGradByFreq := *(*C.int)(unsafe.Pointer(&scaleGradByFreq)) +csparse := *(*C.int)(unsafe.Pointer(&sparse)) +C.atg_embedding(ptr, weight, indices, cpaddingIdx, cscaleGradByFreq, csparse) +} +func AtgEmbeddingBackward(ptr *Ctensor, grad Ctensor, indices Ctensor, numWeights int64, paddingIdx int64, scaleGradByFreq int32, sparse int32){ +cnumWeights := *(*C.int64_t)(unsafe.Pointer(&numWeights)) +cpaddingIdx := *(*C.int64_t)(unsafe.Pointer(&paddingIdx)) +cscaleGradByFreq := *(*C.int)(unsafe.Pointer(&scaleGradByFreq)) +csparse := *(*C.int)(unsafe.Pointer(&sparse)) +C.atg_embedding_backward(ptr, grad, indices, cnumWeights, cpaddingIdx, cscaleGradByFreq, csparse) +} +func AtgEmbeddingBag(ptr *Ctensor, weight Ctensor, indices Ctensor, offsets Ctensor, scaleGradByFreq int32, mode int64, sparse int32, perSampleWeights Ctensor, includeLastOffset int32){ +cscaleGradByFreq := *(*C.int)(unsafe.Pointer(&scaleGradByFreq)) +cmode := *(*C.int64_t)(unsafe.Pointer(&mode)) +csparse := *(*C.int)(unsafe.Pointer(&sparse)) +cincludeLastOffset := *(*C.int)(unsafe.Pointer(&includeLastOffset)) +C.atg_embedding_bag(ptr, weight, indices, offsets, cscaleGradByFreq, cmode, csparse, perSampleWeights, cincludeLastOffset) +} +func AtgEmbeddingDenseBackward(ptr *Ctensor, gradOutput Ctensor, indices Ctensor, numWeights int64, paddingIdx int64, scaleGradByFreq int32){ +cnumWeights := *(*C.int64_t)(unsafe.Pointer(&numWeights)) +cpaddingIdx := *(*C.int64_t)(unsafe.Pointer(&paddingIdx)) +cscaleGradByFreq := *(*C.int)(unsafe.Pointer(&scaleGradByFreq)) +C.atg_embedding_dense_backward(ptr, gradOutput, indices, cnumWeights, cpaddingIdx, cscaleGradByFreq) +} +func AtgEmbeddingRenorm_(ptr *Ctensor, self Ctensor, indices Ctensor, maxNorm float64, normType float64){ +cmaxNorm := *(*C.double)(unsafe.Pointer(&maxNorm)) +cnormType := *(*C.double)(unsafe.Pointer(&normType)) +C.atg_embedding_renorm_(ptr, self, indices, cmaxNorm, cnormType) +} +func AtgEmbeddingSparseBackward(ptr *Ctensor, grad Ctensor, indices Ctensor, numWeights int64, paddingIdx int64, scaleGradByFreq int32){ +cnumWeights := *(*C.int64_t)(unsafe.Pointer(&numWeights)) +cpaddingIdx := *(*C.int64_t)(unsafe.Pointer(&paddingIdx)) +cscaleGradByFreq := *(*C.int)(unsafe.Pointer(&scaleGradByFreq)) +C.atg_embedding_sparse_backward(ptr, grad, indices, cnumWeights, cpaddingIdx, cscaleGradByFreq) +} +func AtgEmpty(ptr *Ctensor, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32){ +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_empty(ptr, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice) +} +func AtgEmptyLike(ptr *Ctensor, self Ctensor){ +C.atg_empty_like(ptr, self) +} +func AtgEmptyOut(ptr *Ctensor, out Ctensor, sizeData []int64, sizeLen int){ +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +C.atg_empty_out(ptr, out, csizeDataPtr, csizeLen) +} +func AtgEmptyStrided(ptr *Ctensor, sizeData []int64, sizeLen int, strideData []int64, strideLen int, optionsKind int32, optionsDevice int32){ +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_empty_strided(ptr, csizeDataPtr, csizeLen, cstrideDataPtr, cstrideLen, coptionsKind, coptionsDevice) +} +func AtgEq(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_eq(ptr, self, other ) +} +func AtgEq1(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_eq1(ptr, self, other) +} +func AtgEq_(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_eq_(ptr, self, other ) +} +func AtgEq1_(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_eq_1(ptr, self, other) +} +func AtgEqOut(ptr *Ctensor, out Ctensor, self Ctensor, other Cscalar){ +C.atg_eq_out(ptr, out, self, other ) +} +func AtgEqOut1(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ +C.atg_eq_out1(ptr, out, self, other) +} +func AtgErf(ptr *Ctensor, self Ctensor){ +C.atg_erf(ptr, self) +} +func AtgErf_(ptr *Ctensor, self Ctensor){ +C.atg_erf_(ptr, self) +} +func AtgErfOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_erf_out(ptr, out, self) +} +func AtgErfc(ptr *Ctensor, self Ctensor){ +C.atg_erfc(ptr, self) +} +func AtgErfc_(ptr *Ctensor, self Ctensor){ +C.atg_erfc_(ptr, self) +} +func AtgErfcOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_erfc_out(ptr, out, self) +} +func AtgErfinv(ptr *Ctensor, self Ctensor){ +C.atg_erfinv(ptr, self) +} +func AtgErfinv_(ptr *Ctensor, self Ctensor){ +C.atg_erfinv_(ptr, self) +} +func AtgErfinvOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_erfinv_out(ptr, out, self) +} +func AtgExp(ptr *Ctensor, self Ctensor){ +C.atg_exp(ptr, self) +} +func AtgExp_(ptr *Ctensor, self Ctensor){ +C.atg_exp_(ptr, self) +} +func AtgExpOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_exp_out(ptr, out, self) +} +func AtgExpand(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int, implicit int32){ +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +cimplicit := *(*C.int)(unsafe.Pointer(&implicit)) +C.atg_expand(ptr, self, csizeDataPtr, csizeLen, cimplicit) +} +func AtgExpandAs(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_expand_as(ptr, self, other) +} +func AtgExpm1(ptr *Ctensor, self Ctensor){ +C.atg_expm1(ptr, self) +} +func AtgExpm1_(ptr *Ctensor, self Ctensor){ +C.atg_expm1_(ptr, self) +} +func AtgExpm1Out(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_expm1_out(ptr, out, self) +} +func AtgExponential_(ptr *Ctensor, self Ctensor, lambd float64){ +clambd := *(*C.double)(unsafe.Pointer(&lambd)) +C.atg_exponential_(ptr, self, clambd) +} +func AtgEye(ptr *Ctensor, n int64, optionsKind int32, optionsDevice int32){ +cn := *(*C.int64_t)(unsafe.Pointer(&n)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_eye(ptr, cn, coptionsKind, coptionsDevice) +} +func AtgEye1(ptr *Ctensor, n int64, m int64, optionsKind int32, optionsDevice int32){ +cn := *(*C.int64_t)(unsafe.Pointer(&n)) +cm := *(*C.int64_t)(unsafe.Pointer(&m)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_eye1(ptr, cn, cm, coptionsKind, coptionsDevice) +} +func AtgEyeOut(ptr *Ctensor, out Ctensor, n int64){ +cn := *(*C.int64_t)(unsafe.Pointer(&n)) +C.atg_eye_out(ptr, out, cn) +} +func AtgEyeOut1(ptr *Ctensor, out Ctensor, n int64, m int64){ +cn := *(*C.int64_t)(unsafe.Pointer(&n)) +cm := *(*C.int64_t)(unsafe.Pointer(&m)) +C.atg_eye_out1(ptr, out, cn, cm) +} +func AtgFakeQuantizePerChannelAffine(ptr *Ctensor, self Ctensor, scale Ctensor, zeroPoint Ctensor, axis int64, quantMin int64, quantMax int64){ +caxis := *(*C.int64_t)(unsafe.Pointer(&axis)) +cquantMin := *(*C.int64_t)(unsafe.Pointer(&quantMin)) +cquantMax := *(*C.int64_t)(unsafe.Pointer(&quantMax)) +C.atg_fake_quantize_per_channel_affine(ptr, self, scale, zeroPoint, caxis, cquantMin, cquantMax) +} +func AtgFakeQuantizePerChannelAffineBackward(ptr *Ctensor, grad Ctensor, self Ctensor, scale Ctensor, zeroPoint Ctensor, axis int64, quantMin int64, quantMax int64){ +caxis := *(*C.int64_t)(unsafe.Pointer(&axis)) +cquantMin := *(*C.int64_t)(unsafe.Pointer(&quantMin)) +cquantMax := *(*C.int64_t)(unsafe.Pointer(&quantMax)) +C.atg_fake_quantize_per_channel_affine_backward(ptr, grad, self, scale, zeroPoint, caxis, cquantMin, cquantMax) +} +func AtgFakeQuantizePerTensorAffine(ptr *Ctensor, self Ctensor, scale float64, zeroPoint int64, quantMin int64, quantMax int64){ +cscale := *(*C.double)(unsafe.Pointer(&scale)) +czeroPoint := *(*C.int64_t)(unsafe.Pointer(&zeroPoint)) +cquantMin := *(*C.int64_t)(unsafe.Pointer(&quantMin)) +cquantMax := *(*C.int64_t)(unsafe.Pointer(&quantMax)) +C.atg_fake_quantize_per_tensor_affine(ptr, self, cscale, czeroPoint, cquantMin, cquantMax) +} +func AtgFakeQuantizePerTensorAffineBackward(ptr *Ctensor, grad Ctensor, self Ctensor, scale float64, zeroPoint int64, quantMin int64, quantMax int64){ +cscale := *(*C.double)(unsafe.Pointer(&scale)) +czeroPoint := *(*C.int64_t)(unsafe.Pointer(&zeroPoint)) +cquantMin := *(*C.int64_t)(unsafe.Pointer(&quantMin)) +cquantMax := *(*C.int64_t)(unsafe.Pointer(&quantMax)) +C.atg_fake_quantize_per_tensor_affine_backward(ptr, grad, self, cscale, czeroPoint, cquantMin, cquantMax) +} +func AtgFbgemmLinearFp16Weight(ptr *Ctensor, input Ctensor, packedWeight Ctensor, bias Ctensor){ +C.atg_fbgemm_linear_fp16_weight(ptr, input, packedWeight, bias) +} +func AtgFbgemmLinearFp16WeightFp32Activation(ptr *Ctensor, input Ctensor, packedWeight Ctensor, bias Ctensor){ +C.atg_fbgemm_linear_fp16_weight_fp32_activation(ptr, input, packedWeight, bias) +} +func AtgFbgemmLinearInt8Weight(ptr *Ctensor, input Ctensor, weight Ctensor, packed Ctensor, colOffsets Ctensor, weightScale Cscalar, weightZeroPoint Cscalar, bias Ctensor){ +C.atg_fbgemm_linear_int8_weight(ptr, input, weight, packed, colOffsets, weightScale , weightZeroPoint , bias) +} +func AtgFbgemmLinearInt8WeightFp32Activation(ptr *Ctensor, input Ctensor, weight Ctensor, packed Ctensor, colOffsets Ctensor, weightScale Cscalar, weightZeroPoint Cscalar, bias Ctensor){ +C.atg_fbgemm_linear_int8_weight_fp32_activation(ptr, input, weight, packed, colOffsets, weightScale , weightZeroPoint , bias) +} +func AtgFbgemmPackGemmMatrixFp16(ptr *Ctensor, input Ctensor){ +C.atg_fbgemm_pack_gemm_matrix_fp16(ptr, input) +} +func AtgFbgemmPackQuantizedMatrix(ptr *Ctensor, input Ctensor){ +C.atg_fbgemm_pack_quantized_matrix(ptr, input) +} +func AtgFbgemmPackQuantizedMatrix1(ptr *Ctensor, input Ctensor, k int64, n int64){ +ck := *(*C.int64_t)(unsafe.Pointer(&k)) +cn := *(*C.int64_t)(unsafe.Pointer(&n)) +C.atg_fbgemm_pack_quantized_matrix1(ptr, input, ck, cn) +} +func AtgFeatureAlphaDropout(ptr *Ctensor, input Ctensor, p float64, train int32){ +cp := *(*C.double)(unsafe.Pointer(&p)) +ctrain := *(*C.int)(unsafe.Pointer(&train)) +C.atg_feature_alpha_dropout(ptr, input, cp, ctrain) +} +func AtgFeatureAlphaDropout_(ptr *Ctensor, self Ctensor, p float64, train int32){ +cp := *(*C.double)(unsafe.Pointer(&p)) +ctrain := *(*C.int)(unsafe.Pointer(&train)) +C.atg_feature_alpha_dropout_(ptr, self, cp, ctrain) +} +func AtgFeatureDropout(ptr *Ctensor, input Ctensor, p float64, train int32){ +cp := *(*C.double)(unsafe.Pointer(&p)) +ctrain := *(*C.int)(unsafe.Pointer(&train)) +C.atg_feature_dropout(ptr, input, cp, ctrain) +} +func AtgFeatureDropout_(ptr *Ctensor, self Ctensor, p float64, train int32){ +cp := *(*C.double)(unsafe.Pointer(&p)) +ctrain := *(*C.int)(unsafe.Pointer(&train)) +C.atg_feature_dropout_(ptr, self, cp, ctrain) +} +func AtgFft(ptr *Ctensor, self Ctensor, signalNdim int64, normalized int32){ +csignalNdim := *(*C.int64_t)(unsafe.Pointer(&signalNdim)) +cnormalized := *(*C.int)(unsafe.Pointer(&normalized)) +C.atg_fft(ptr, self, csignalNdim, cnormalized) +} +func AtgFill_(ptr *Ctensor, self Ctensor, value Cscalar){ +C.atg_fill_(ptr, self, value ) +} +func AtgFill1_(ptr *Ctensor, self Ctensor, value Ctensor){ +C.atg_fill_1(ptr, self, value) +} +func AtgFillDiagonal_(ptr *Ctensor, self Ctensor, fillValue Cscalar, wrap int32){ +cwrap := *(*C.int)(unsafe.Pointer(&wrap)) +C.atg_fill_diagonal_(ptr, self, fillValue , cwrap) +} +func AtgFlatten(ptr *Ctensor, self Ctensor, startDim int64, endDim int64){ +cstartDim := *(*C.int64_t)(unsafe.Pointer(&startDim)) +cendDim := *(*C.int64_t)(unsafe.Pointer(&endDim)) +C.atg_flatten(ptr, self, cstartDim, cendDim) +} +func AtgFlip(ptr *Ctensor, self Ctensor, dimsData []int64, dimsLen int){ +cdimsDataPtr := (*C.int64_t)(unsafe.Pointer(&dimsData[0])) +cdimsLen := *(*C.int)(unsafe.Pointer(&dimsLen)) +C.atg_flip(ptr, self, cdimsDataPtr, cdimsLen) +} +func AtgFloor(ptr *Ctensor, self Ctensor){ +C.atg_floor(ptr, self) +} +func AtgFloor_(ptr *Ctensor, self Ctensor){ +C.atg_floor_(ptr, self) +} +func AtgFloorDivide(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_floor_divide(ptr, self, other) +} +func AtgFloorDivide1(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_floor_divide1(ptr, self, other ) +} +func AtgFloorDivide_(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_floor_divide_(ptr, self, other) +} +func AtgFloorDivide1_(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_floor_divide_1(ptr, self, other ) +} +func AtgFloorDivideOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ +C.atg_floor_divide_out(ptr, out, self, other) +} +func AtgFloorOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_floor_out(ptr, out, self) +} +func AtgFmod(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_fmod(ptr, self, other ) +} +func AtgFmod1(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_fmod1(ptr, self, other) +} +func AtgFmod_(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_fmod_(ptr, self, other ) +} +func AtgFmod1_(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_fmod_1(ptr, self, other) +} +func AtgFmodOut(ptr *Ctensor, out Ctensor, self Ctensor, other Cscalar){ +C.atg_fmod_out(ptr, out, self, other ) +} +func AtgFmodOut1(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ +C.atg_fmod_out1(ptr, out, self, other) +} +func AtgFrac(ptr *Ctensor, self Ctensor){ +C.atg_frac(ptr, self) +} +func AtgFrac_(ptr *Ctensor, self Ctensor){ +C.atg_frac_(ptr, self) +} +func AtgFracOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_frac_out(ptr, out, self) +} +func AtgFractionalMaxPool2d(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, outputSizeData []int64, outputSizeLen int, randomSamples Ctensor){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +C.atg_fractional_max_pool2d(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, coutputSizeDataPtr, coutputSizeLen, randomSamples) +} +func AtgFractionalMaxPool2dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, outputSizeData []int64, outputSizeLen int, indices Ctensor){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +C.atg_fractional_max_pool2d_backward(ptr, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, coutputSizeDataPtr, coutputSizeLen, indices) +} +func AtgFractionalMaxPool2dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, outputSizeData []int64, outputSizeLen int, indices Ctensor){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +C.atg_fractional_max_pool2d_backward_out(ptr, gradInput, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, coutputSizeDataPtr, coutputSizeLen, indices) +} +func AtgFractionalMaxPool2dOut(ptr *Ctensor, output Ctensor, indices Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, outputSizeData []int64, outputSizeLen int, randomSamples Ctensor){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +C.atg_fractional_max_pool2d_out(ptr, output, indices, self, ckernelSizeDataPtr, ckernelSizeLen, coutputSizeDataPtr, coutputSizeLen, randomSamples) +} +func AtgFractionalMaxPool3d(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, outputSizeData []int64, outputSizeLen int, randomSamples Ctensor){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +C.atg_fractional_max_pool3d(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, coutputSizeDataPtr, coutputSizeLen, randomSamples) +} +func AtgFractionalMaxPool3dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, outputSizeData []int64, outputSizeLen int, indices Ctensor){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +C.atg_fractional_max_pool3d_backward(ptr, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, coutputSizeDataPtr, coutputSizeLen, indices) +} +func AtgFractionalMaxPool3dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, outputSizeData []int64, outputSizeLen int, indices Ctensor){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +C.atg_fractional_max_pool3d_backward_out(ptr, gradInput, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, coutputSizeDataPtr, coutputSizeLen, indices) +} +func AtgFractionalMaxPool3dOut(ptr *Ctensor, output Ctensor, indices Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, outputSizeData []int64, outputSizeLen int, randomSamples Ctensor){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +C.atg_fractional_max_pool3d_out(ptr, output, indices, self, ckernelSizeDataPtr, ckernelSizeLen, coutputSizeDataPtr, coutputSizeLen, randomSamples) +} +func AtgFrobeniusNorm(ptr *Ctensor, self Ctensor){ +C.atg_frobenius_norm(ptr, self) +} +func AtgFrobeniusNorm1(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32){ +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_frobenius_norm1(ptr, self, cdimDataPtr, cdimLen, ckeepdim) +} +func AtgFrobeniusNormOut(ptr *Ctensor, out Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32){ +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_frobenius_norm_out(ptr, out, self, cdimDataPtr, cdimLen, ckeepdim) +} +func AtgFromFile(ptr *Ctensor, filename string, shared int32, size int64, optionsKind int32, optionsDevice int32){ +cfilename := C.CString(filename) +filenameLen := len(filename) +cfilenameLen := *(*C.int)(unsafe.Pointer(&filenameLen)) +cshared := *(*C.int)(unsafe.Pointer(&shared)) +csize := *(*C.int64_t)(unsafe.Pointer(&size)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_from_file(ptr, cfilename, cfilenameLen, cshared, csize, coptionsKind, coptionsDevice) +} +func AtgFull(ptr *Ctensor, sizeData []int64, sizeLen int, fillValue Cscalar, optionsKind int32, optionsDevice int32){ +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_full(ptr, csizeDataPtr, csizeLen, fillValue , coptionsKind, coptionsDevice) +} +func AtgFullLike(ptr *Ctensor, self Ctensor, fillValue Cscalar){ +C.atg_full_like(ptr, self, fillValue ) +} +func AtgFullOut(ptr *Ctensor, out Ctensor, sizeData []int64, sizeLen int, fillValue Cscalar){ +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +C.atg_full_out(ptr, out, csizeDataPtr, csizeLen, fillValue ) +} +func AtgGather(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, sparseGrad int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +csparseGrad := *(*C.int)(unsafe.Pointer(&sparseGrad)) +C.atg_gather(ptr, self, cdim, index, csparseGrad) +} +func AtgGatherOut(ptr *Ctensor, out Ctensor, self Ctensor, dim int64, index Ctensor, sparseGrad int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +csparseGrad := *(*C.int)(unsafe.Pointer(&sparseGrad)) +C.atg_gather_out(ptr, out, self, cdim, index, csparseGrad) +} +func AtgGe(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_ge(ptr, self, other ) +} +func AtgGe1(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_ge1(ptr, self, other) +} +func AtgGe_(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_ge_(ptr, self, other ) +} +func AtgGe1_(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_ge_1(ptr, self, other) +} +func AtgGeOut(ptr *Ctensor, out Ctensor, self Ctensor, other Cscalar){ +C.atg_ge_out(ptr, out, self, other ) +} +func AtgGeOut1(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ +C.atg_ge_out1(ptr, out, self, other) +} +func AtgGelu(ptr *Ctensor, self Ctensor){ +C.atg_gelu(ptr, self) +} +func AtgGeluBackward(ptr *Ctensor, grad Ctensor, self Ctensor){ +C.atg_gelu_backward(ptr, grad, self) +} +func AtgGeometric_(ptr *Ctensor, self Ctensor, p float64){ +cp := *(*C.double)(unsafe.Pointer(&p)) +C.atg_geometric_(ptr, self, cp) +} +func AtgGeqrf(ptr *Ctensor, self Ctensor){ +C.atg_geqrf(ptr, self) +} +func AtgGeqrfOut(ptr *Ctensor, a Ctensor, tau Ctensor, self Ctensor){ +C.atg_geqrf_out(ptr, a, tau, self) +} +func AtgGer(ptr *Ctensor, self Ctensor, vec2 Ctensor){ +C.atg_ger(ptr, self, vec2) +} +func AtgGerOut(ptr *Ctensor, out Ctensor, self Ctensor, vec2 Ctensor){ +C.atg_ger_out(ptr, out, self, vec2) +} +func AtgGlu(ptr *Ctensor, self Ctensor, dim int64){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_glu(ptr, self, cdim) +} +func AtgGluBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, dim int64){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_glu_backward(ptr, gradOutput, self, cdim) +} +func AtgGluBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, dim int64){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_glu_backward_out(ptr, gradInput, gradOutput, self, cdim) +} +func AtgGluOut(ptr *Ctensor, out Ctensor, self Ctensor, dim int64){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_glu_out(ptr, out, self, cdim) +} +func AtgGrad(ptr *Ctensor, self Ctensor){ +C.atg_grad(ptr, self) +} +func AtgGridSampler(ptr *Ctensor, input Ctensor, grid Ctensor, interpolationMode int64, paddingMode int64, alignCorners int32){ +cinterpolationMode := *(*C.int64_t)(unsafe.Pointer(&interpolationMode)) +cpaddingMode := *(*C.int64_t)(unsafe.Pointer(&paddingMode)) +calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) +C.atg_grid_sampler(ptr, input, grid, cinterpolationMode, cpaddingMode, calignCorners) +} +func AtgGridSampler2d(ptr *Ctensor, input Ctensor, grid Ctensor, interpolationMode int64, paddingMode int64, alignCorners int32){ +cinterpolationMode := *(*C.int64_t)(unsafe.Pointer(&interpolationMode)) +cpaddingMode := *(*C.int64_t)(unsafe.Pointer(&paddingMode)) +calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) +C.atg_grid_sampler_2d(ptr, input, grid, cinterpolationMode, cpaddingMode, calignCorners) +} +func AtgGridSampler2dBackward(ptr *Ctensor, gradOutput Ctensor, input Ctensor, grid Ctensor, interpolationMode int64, paddingMode int64, alignCorners int32){ +cinterpolationMode := *(*C.int64_t)(unsafe.Pointer(&interpolationMode)) +cpaddingMode := *(*C.int64_t)(unsafe.Pointer(&paddingMode)) +calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) +C.atg_grid_sampler_2d_backward(ptr, gradOutput, input, grid, cinterpolationMode, cpaddingMode, calignCorners) +} +func AtgGridSampler3d(ptr *Ctensor, input Ctensor, grid Ctensor, interpolationMode int64, paddingMode int64, alignCorners int32){ +cinterpolationMode := *(*C.int64_t)(unsafe.Pointer(&interpolationMode)) +cpaddingMode := *(*C.int64_t)(unsafe.Pointer(&paddingMode)) +calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) +C.atg_grid_sampler_3d(ptr, input, grid, cinterpolationMode, cpaddingMode, calignCorners) +} +func AtgGridSampler3dBackward(ptr *Ctensor, gradOutput Ctensor, input Ctensor, grid Ctensor, interpolationMode int64, paddingMode int64, alignCorners int32){ +cinterpolationMode := *(*C.int64_t)(unsafe.Pointer(&interpolationMode)) +cpaddingMode := *(*C.int64_t)(unsafe.Pointer(&paddingMode)) +calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) +C.atg_grid_sampler_3d_backward(ptr, gradOutput, input, grid, cinterpolationMode, cpaddingMode, calignCorners) +} +func AtgGroupNorm(ptr *Ctensor, input Ctensor, numGroups int64, weight Ctensor, bias Ctensor, eps float64, cudnnEnabled int32){ +cnumGroups := *(*C.int64_t)(unsafe.Pointer(&numGroups)) +ceps := *(*C.double)(unsafe.Pointer(&eps)) +ccudnnEnabled := *(*C.int)(unsafe.Pointer(&cudnnEnabled)) +C.atg_group_norm(ptr, input, cnumGroups, weight, bias, ceps, ccudnnEnabled) +} +func AtgGru(ptr *Ctensor, input Ctensor, hx Ctensor, paramsData []Ctensor, paramsLen int, hasBiases int32, numLayers int64, dropout float64, train int32, bidirectional int32, batchFirst int32){ +cparamsDataPtr := (*Ctensor)(unsafe.Pointer(¶msData[0])) +cparamsLen := *(*C.int)(unsafe.Pointer(¶msLen)) +chasBiases := *(*C.int)(unsafe.Pointer(&hasBiases)) +cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) +cdropout := *(*C.double)(unsafe.Pointer(&dropout)) +ctrain := *(*C.int)(unsafe.Pointer(&train)) +cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) +cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst)) +C.atg_gru(ptr, input, hx, cparamsDataPtr, cparamsLen, chasBiases, cnumLayers, cdropout, ctrain, cbidirectional, cbatchFirst) +} +func AtgGru1(ptr *Ctensor, data Ctensor, batchSizes Ctensor, hx Ctensor, paramsData []Ctensor, paramsLen int, hasBiases int32, numLayers int64, dropout float64, train int32, bidirectional int32){ +cparamsDataPtr := (*Ctensor)(unsafe.Pointer(¶msData[0])) +cparamsLen := *(*C.int)(unsafe.Pointer(¶msLen)) +chasBiases := *(*C.int)(unsafe.Pointer(&hasBiases)) +cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) +cdropout := *(*C.double)(unsafe.Pointer(&dropout)) +ctrain := *(*C.int)(unsafe.Pointer(&train)) +cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) +C.atg_gru1(ptr, data, batchSizes, hx, cparamsDataPtr, cparamsLen, chasBiases, cnumLayers, cdropout, ctrain, cbidirectional) +} +func AtgGruCell(ptr *Ctensor, input Ctensor, hx Ctensor, wIh Ctensor, wHh Ctensor, bIh Ctensor, bHh Ctensor){ +C.atg_gru_cell(ptr, input, hx, wIh, wHh, bIh, bHh) +} +func AtgGt(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_gt(ptr, self, other ) +} +func AtgGt1(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_gt1(ptr, self, other) +} +func AtgGt_(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_gt_(ptr, self, other ) +} +func AtgGt1_(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_gt_1(ptr, self, other) +} +func AtgGtOut(ptr *Ctensor, out Ctensor, self Ctensor, other Cscalar){ +C.atg_gt_out(ptr, out, self, other ) +} +func AtgGtOut1(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ +C.atg_gt_out1(ptr, out, self, other) +} +func AtgHammingWindow(ptr *Ctensor, windowLength int64, optionsKind int32, optionsDevice int32){ +cwindowLength := *(*C.int64_t)(unsafe.Pointer(&windowLength)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_hamming_window(ptr, cwindowLength, coptionsKind, coptionsDevice) +} +func AtgHammingWindow1(ptr *Ctensor, windowLength int64, periodic int32, optionsKind int32, optionsDevice int32){ +cwindowLength := *(*C.int64_t)(unsafe.Pointer(&windowLength)) +cperiodic := *(*C.int)(unsafe.Pointer(&periodic)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_hamming_window1(ptr, cwindowLength, cperiodic, coptionsKind, coptionsDevice) +} +func AtgHammingWindow2(ptr *Ctensor, windowLength int64, periodic int32, alpha float64, optionsKind int32, optionsDevice int32){ +cwindowLength := *(*C.int64_t)(unsafe.Pointer(&windowLength)) +cperiodic := *(*C.int)(unsafe.Pointer(&periodic)) +calpha := *(*C.double)(unsafe.Pointer(&alpha)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_hamming_window2(ptr, cwindowLength, cperiodic, calpha, coptionsKind, coptionsDevice) +} +func AtgHammingWindow3(ptr *Ctensor, windowLength int64, periodic int32, alpha float64, beta float64, optionsKind int32, optionsDevice int32){ +cwindowLength := *(*C.int64_t)(unsafe.Pointer(&windowLength)) +cperiodic := *(*C.int)(unsafe.Pointer(&periodic)) +calpha := *(*C.double)(unsafe.Pointer(&alpha)) +cbeta := *(*C.double)(unsafe.Pointer(&beta)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_hamming_window3(ptr, cwindowLength, cperiodic, calpha, cbeta, coptionsKind, coptionsDevice) +} +func AtgHannWindow(ptr *Ctensor, windowLength int64, optionsKind int32, optionsDevice int32){ +cwindowLength := *(*C.int64_t)(unsafe.Pointer(&windowLength)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_hann_window(ptr, cwindowLength, coptionsKind, coptionsDevice) +} +func AtgHannWindow1(ptr *Ctensor, windowLength int64, periodic int32, optionsKind int32, optionsDevice int32){ +cwindowLength := *(*C.int64_t)(unsafe.Pointer(&windowLength)) +cperiodic := *(*C.int)(unsafe.Pointer(&periodic)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_hann_window1(ptr, cwindowLength, cperiodic, coptionsKind, coptionsDevice) +} +func AtgHardshrink(ptr *Ctensor, self Ctensor){ +C.atg_hardshrink(ptr, self) +} +func AtgHardshrinkBackward(ptr *Ctensor, gradOut Ctensor, self Ctensor, lambd Cscalar){ +C.atg_hardshrink_backward(ptr, gradOut, self, lambd ) +} +func AtgHardsigmoid(ptr *Ctensor, self Ctensor){ +C.atg_hardsigmoid(ptr, self) +} +func AtgHardsigmoid_(ptr *Ctensor, self Ctensor){ +C.atg_hardsigmoid_(ptr, self) +} +func AtgHardsigmoidBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor){ +C.atg_hardsigmoid_backward(ptr, gradOutput, self) +} +func AtgHardsigmoidOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_hardsigmoid_out(ptr, out, self) +} +func AtgHardtanh(ptr *Ctensor, self Ctensor){ +C.atg_hardtanh(ptr, self) +} +func AtgHardtanh_(ptr *Ctensor, self Ctensor){ +C.atg_hardtanh_(ptr, self) +} +func AtgHardtanhBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, minVal Cscalar, maxVal Cscalar){ +C.atg_hardtanh_backward(ptr, gradOutput, self, minVal , maxVal ) +} +func AtgHardtanhBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, minVal Cscalar, maxVal Cscalar){ +C.atg_hardtanh_backward_out(ptr, gradInput, gradOutput, self, minVal , maxVal ) +} +func AtgHardtanhOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_hardtanh_out(ptr, out, self) +} +func AtgHingeEmbeddingLoss(ptr *Ctensor, self Ctensor, target Ctensor, margin float64, reduction int64){ +cmargin := *(*C.double)(unsafe.Pointer(&margin)) +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +C.atg_hinge_embedding_loss(ptr, self, target, cmargin, creduction) +} +func AtgHistc(ptr *Ctensor, self Ctensor, bins int64){ +cbins := *(*C.int64_t)(unsafe.Pointer(&bins)) +C.atg_histc(ptr, self, cbins) +} +func AtgHistcOut(ptr *Ctensor, out Ctensor, self Ctensor, bins int64){ +cbins := *(*C.int64_t)(unsafe.Pointer(&bins)) +C.atg_histc_out(ptr, out, self, cbins) +} +func AtgHspmm(ptr *Ctensor, mat1 Ctensor, mat2 Ctensor){ +C.atg_hspmm(ptr, mat1, mat2) +} +func AtgHspmmOut(ptr *Ctensor, out Ctensor, mat1 Ctensor, mat2 Ctensor){ +C.atg_hspmm_out(ptr, out, mat1, mat2) +} +func AtgIfft(ptr *Ctensor, self Ctensor, signalNdim int64, normalized int32){ +csignalNdim := *(*C.int64_t)(unsafe.Pointer(&signalNdim)) +cnormalized := *(*C.int)(unsafe.Pointer(&normalized)) +C.atg_ifft(ptr, self, csignalNdim, cnormalized) +} +func AtgIm2col(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, dilationData []int64, dilationLen int, paddingData []int64, paddingLen int, strideData []int64, strideLen int){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +C.atg_im2col(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cdilationDataPtr, cdilationLen, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen) +} +func AtgIm2colBackward(ptr *Ctensor, gradOutput Ctensor, inputSizeData []int64, inputSizeLen int, kernelSizeData []int64, kernelSizeLen int, dilationData []int64, dilationLen int, paddingData []int64, paddingLen int, strideData []int64, strideLen int){ +cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) +cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +C.atg_im2col_backward(ptr, gradOutput, cinputSizeDataPtr, cinputSizeLen, ckernelSizeDataPtr, ckernelSizeLen, cdilationDataPtr, cdilationLen, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen) +} +func AtgIm2colBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, inputSizeData []int64, inputSizeLen int, kernelSizeData []int64, kernelSizeLen int, dilationData []int64, dilationLen int, paddingData []int64, paddingLen int, strideData []int64, strideLen int){ +cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) +cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +C.atg_im2col_backward_out(ptr, gradInput, gradOutput, cinputSizeDataPtr, cinputSizeLen, ckernelSizeDataPtr, ckernelSizeLen, cdilationDataPtr, cdilationLen, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen) +} +func AtgIm2colOut(ptr *Ctensor, out Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, dilationData []int64, dilationLen int, paddingData []int64, paddingLen int, strideData []int64, strideLen int){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +C.atg_im2col_out(ptr, out, self, ckernelSizeDataPtr, ckernelSizeLen, cdilationDataPtr, cdilationLen, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen) +} +func AtgImag(ptr *Ctensor, self Ctensor){ +C.atg_imag(ptr, self) +} +func AtgIndex(ptr *Ctensor, self Ctensor, indicesData []Ctensor, indicesLen int){ +cindicesDataPtr := (*Ctensor)(unsafe.Pointer(&indicesData[0])) +cindicesLen := *(*C.int)(unsafe.Pointer(&indicesLen)) +C.atg_index(ptr, self, cindicesDataPtr, cindicesLen) +} +func AtgIndexAdd(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, source Ctensor){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_index_add(ptr, self, cdim, index, source) +} +func AtgIndexAdd_(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, source Ctensor){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_index_add_(ptr, self, cdim, index, source) +} +func AtgIndexCopy(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, source Ctensor){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_index_copy(ptr, self, cdim, index, source) +} +func AtgIndexCopy_(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, source Ctensor){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_index_copy_(ptr, self, cdim, index, source) +} +func AtgIndexFill(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, value Cscalar){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_index_fill(ptr, self, cdim, index, value ) +} +func AtgIndexFill1(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, value Ctensor){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_index_fill1(ptr, self, cdim, index, value) +} +func AtgIndexFill_(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, value Cscalar){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_index_fill_(ptr, self, cdim, index, value ) +} +func AtgIndexFill1_(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, value Ctensor){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_index_fill_1(ptr, self, cdim, index, value) +} +func AtgIndexPut(ptr *Ctensor, self Ctensor, indicesData []Ctensor, indicesLen int, values Ctensor, accumulate int32){ +cindicesDataPtr := (*Ctensor)(unsafe.Pointer(&indicesData[0])) +cindicesLen := *(*C.int)(unsafe.Pointer(&indicesLen)) +caccumulate := *(*C.int)(unsafe.Pointer(&accumulate)) +C.atg_index_put(ptr, self, cindicesDataPtr, cindicesLen, values, caccumulate) +} +func AtgIndexPut_(ptr *Ctensor, self Ctensor, indicesData []Ctensor, indicesLen int, values Ctensor, accumulate int32){ +cindicesDataPtr := (*Ctensor)(unsafe.Pointer(&indicesData[0])) +cindicesLen := *(*C.int)(unsafe.Pointer(&indicesLen)) +caccumulate := *(*C.int)(unsafe.Pointer(&accumulate)) +C.atg_index_put_(ptr, self, cindicesDataPtr, cindicesLen, values, caccumulate) +} +func AtgIndexSelect(ptr *Ctensor, self Ctensor, dim int64, index Ctensor){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_index_select(ptr, self, cdim, index) +} +func AtgIndexSelectOut(ptr *Ctensor, out Ctensor, self Ctensor, dim int64, index Ctensor){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_index_select_out(ptr, out, self, cdim, index) +} +func AtgIndices(ptr *Ctensor, self Ctensor){ +C.atg_indices(ptr, self) +} +func AtgInstanceNorm(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, runningMean Ctensor, runningVar Ctensor, useInputStats int32, momentum float64, eps float64, cudnnEnabled int32){ +cuseInputStats := *(*C.int)(unsafe.Pointer(&useInputStats)) +cmomentum := *(*C.double)(unsafe.Pointer(&momentum)) +ceps := *(*C.double)(unsafe.Pointer(&eps)) +ccudnnEnabled := *(*C.int)(unsafe.Pointer(&cudnnEnabled)) +C.atg_instance_norm(ptr, input, weight, bias, runningMean, runningVar, cuseInputStats, cmomentum, ceps, ccudnnEnabled) +} +func AtgIntRepr(ptr *Ctensor, self Ctensor){ +C.atg_int_repr(ptr, self) +} +func AtgInverse(ptr *Ctensor, self Ctensor){ +C.atg_inverse(ptr, self) +} +func AtgInverseOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_inverse_out(ptr, out, self) +} +func AtgIrfft(ptr *Ctensor, self Ctensor, signalNdim int64, normalized int32, onesided int32, signalSizesData []int64, signalSizesLen int){ +csignalNdim := *(*C.int64_t)(unsafe.Pointer(&signalNdim)) +cnormalized := *(*C.int)(unsafe.Pointer(&normalized)) +conesided := *(*C.int)(unsafe.Pointer(&onesided)) +csignalSizesDataPtr := (*C.int64_t)(unsafe.Pointer(&signalSizesData[0])) +csignalSizesLen := *(*C.int)(unsafe.Pointer(&signalSizesLen)) +C.atg_irfft(ptr, self, csignalNdim, cnormalized, conesided, csignalSizesDataPtr, csignalSizesLen) +} +func AtgIsclose(ptr *Ctensor, self Ctensor, other Ctensor, rtol float64, atol float64, equalNan int32){ +crtol := *(*C.double)(unsafe.Pointer(&rtol)) +catol := *(*C.double)(unsafe.Pointer(&atol)) +cequalNan := *(*C.int)(unsafe.Pointer(&equalNan)) +C.atg_isclose(ptr, self, other, crtol, catol, cequalNan) +} +func AtgIsfinite(ptr *Ctensor, self Ctensor){ +C.atg_isfinite(ptr, self) +} +func AtgIsinf(ptr *Ctensor, self Ctensor){ +C.atg_isinf(ptr, self) +} +func AtgIsnan(ptr *Ctensor, self Ctensor){ +C.atg_isnan(ptr, self) +} +func AtgKlDiv(ptr *Ctensor, self Ctensor, target Ctensor, reduction int64){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +C.atg_kl_div(ptr, self, target, creduction) +} +func AtgKlDivBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, reduction int64){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +C.atg_kl_div_backward(ptr, gradOutput, self, target, creduction) +} +func AtgKthvalue(ptr *Ctensor, self Ctensor, k int64, dim int64, keepdim int32){ +ck := *(*C.int64_t)(unsafe.Pointer(&k)) +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_kthvalue(ptr, self, ck, cdim, ckeepdim) +} +func AtgKthvalueOut(ptr *Ctensor, values Ctensor, indices Ctensor, self Ctensor, k int64, dim int64, keepdim int32){ +ck := *(*C.int64_t)(unsafe.Pointer(&k)) +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_kthvalue_out(ptr, values, indices, self, ck, cdim, ckeepdim) +} +func AtgL1Loss(ptr *Ctensor, self Ctensor, target Ctensor, reduction int64){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +C.atg_l1_loss(ptr, self, target, creduction) +} +func AtgL1LossBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, reduction int64){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +C.atg_l1_loss_backward(ptr, gradOutput, self, target, creduction) +} +func AtgL1LossBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, reduction int64){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +C.atg_l1_loss_backward_out(ptr, gradInput, gradOutput, self, target, creduction) +} +func AtgL1LossOut(ptr *Ctensor, out Ctensor, self Ctensor, target Ctensor, reduction int64){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +C.atg_l1_loss_out(ptr, out, self, target, creduction) +} +func AtgLayerNorm(ptr *Ctensor, input Ctensor, normalizedShapeData []int64, normalizedShapeLen int, weight Ctensor, bias Ctensor, eps float64, cudnnEnable int32){ +cnormalizedShapeDataPtr := (*C.int64_t)(unsafe.Pointer(&normalizedShapeData[0])) +cnormalizedShapeLen := *(*C.int)(unsafe.Pointer(&normalizedShapeLen)) +ceps := *(*C.double)(unsafe.Pointer(&eps)) +ccudnnEnable := *(*C.int)(unsafe.Pointer(&cudnnEnable)) +C.atg_layer_norm(ptr, input, cnormalizedShapeDataPtr, cnormalizedShapeLen, weight, bias, ceps, ccudnnEnable) +} +func AtgLe(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_le(ptr, self, other ) +} +func AtgLe1(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_le1(ptr, self, other) +} +func AtgLe_(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_le_(ptr, self, other ) +} +func AtgLe1_(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_le_1(ptr, self, other) +} +func AtgLeOut(ptr *Ctensor, out Ctensor, self Ctensor, other Cscalar){ +C.atg_le_out(ptr, out, self, other ) +} +func AtgLeOut1(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ +C.atg_le_out1(ptr, out, self, other) +} +func AtgLeakyRelu(ptr *Ctensor, self Ctensor){ +C.atg_leaky_relu(ptr, self) +} +func AtgLeakyRelu_(ptr *Ctensor, self Ctensor){ +C.atg_leaky_relu_(ptr, self) +} +func AtgLeakyReluBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, negativeSlope Cscalar, selfIsResult int32){ +cselfIsResult := *(*C.int)(unsafe.Pointer(&selfIsResult)) +C.atg_leaky_relu_backward(ptr, gradOutput, self, negativeSlope , cselfIsResult) +} +func AtgLeakyReluOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_leaky_relu_out(ptr, out, self) +} +func AtgLerp(ptr *Ctensor, self Ctensor, end Ctensor, weight Cscalar){ +C.atg_lerp(ptr, self, end, weight ) +} +func AtgLerp1(ptr *Ctensor, self Ctensor, end Ctensor, weight Ctensor){ +C.atg_lerp1(ptr, self, end, weight) +} +func AtgLerp_(ptr *Ctensor, self Ctensor, end Ctensor, weight Cscalar){ +C.atg_lerp_(ptr, self, end, weight ) +} +func AtgLerp1_(ptr *Ctensor, self Ctensor, end Ctensor, weight Ctensor){ +C.atg_lerp_1(ptr, self, end, weight) +} +func AtgLerpOut(ptr *Ctensor, out Ctensor, self Ctensor, end Ctensor, weight Cscalar){ +C.atg_lerp_out(ptr, out, self, end, weight ) +} +func AtgLerpOut1(ptr *Ctensor, out Ctensor, self Ctensor, end Ctensor, weight Ctensor){ +C.atg_lerp_out1(ptr, out, self, end, weight) +} +func AtgLgamma(ptr *Ctensor, self Ctensor){ +C.atg_lgamma(ptr, self) +} +func AtgLgamma_(ptr *Ctensor, self Ctensor){ +C.atg_lgamma_(ptr, self) +} +func AtgLgammaOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_lgamma_out(ptr, out, self) +} +func AtgLinear(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor){ +C.atg_linear(ptr, input, weight, bias) +} +func AtgLinspace(ptr *Ctensor, start Cscalar, end Cscalar, steps int64, optionsKind int32, optionsDevice int32){ +csteps := *(*C.int64_t)(unsafe.Pointer(&steps)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_linspace(ptr, start , end , csteps, coptionsKind, coptionsDevice) +} +func AtgLinspaceOut(ptr *Ctensor, out Ctensor, start Cscalar, end Cscalar, steps int64){ +csteps := *(*C.int64_t)(unsafe.Pointer(&steps)) +C.atg_linspace_out(ptr, out, start , end , csteps) +} +func AtgLog(ptr *Ctensor, self Ctensor){ +C.atg_log(ptr, self) +} +func AtgLog10(ptr *Ctensor, self Ctensor){ +C.atg_log10(ptr, self) +} +func AtgLog10_(ptr *Ctensor, self Ctensor){ +C.atg_log10_(ptr, self) +} +func AtgLog10Out(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_log10_out(ptr, out, self) +} +func AtgLog1p(ptr *Ctensor, self Ctensor){ +C.atg_log1p(ptr, self) +} +func AtgLog1p_(ptr *Ctensor, self Ctensor){ +C.atg_log1p_(ptr, self) +} +func AtgLog1pOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_log1p_out(ptr, out, self) +} +func AtgLog2(ptr *Ctensor, self Ctensor){ +C.atg_log2(ptr, self) +} +func AtgLog2_(ptr *Ctensor, self Ctensor){ +C.atg_log2_(ptr, self) +} +func AtgLog2Out(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_log2_out(ptr, out, self) +} +func AtgLog_(ptr *Ctensor, self Ctensor){ +C.atg_log_(ptr, self) +} +func AtgLogNormal_(ptr *Ctensor, self Ctensor, mean float64, std float64){ +cmean := *(*C.double)(unsafe.Pointer(&mean)) +cstd := *(*C.double)(unsafe.Pointer(&std)) +C.atg_log_normal_(ptr, self, cmean, cstd) +} +func AtgLogOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_log_out(ptr, out, self) +} +func AtgLogSigmoid(ptr *Ctensor, self Ctensor){ +C.atg_log_sigmoid(ptr, self) +} +func AtgLogSigmoidBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, buffer Ctensor){ +C.atg_log_sigmoid_backward(ptr, gradOutput, self, buffer) +} +func AtgLogSigmoidBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, buffer Ctensor){ +C.atg_log_sigmoid_backward_out(ptr, gradInput, gradOutput, self, buffer) +} +func AtgLogSigmoidOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_log_sigmoid_out(ptr, out, self) +} +func AtgLogSoftmax(ptr *Ctensor, self Ctensor, dim int64, dtype int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +cdtype := *(*C.int)(unsafe.Pointer(&dtype)) +C.atg_log_softmax(ptr, self, cdim, cdtype) +} +func AtgLogdet(ptr *Ctensor, self Ctensor){ +C.atg_logdet(ptr, self) +} +func AtgLogicalAnd(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_logical_and(ptr, self, other) +} +func AtgLogicalAnd_(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_logical_and_(ptr, self, other) +} +func AtgLogicalAndOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ +C.atg_logical_and_out(ptr, out, self, other) +} +func AtgLogicalNot(ptr *Ctensor, self Ctensor){ +C.atg_logical_not(ptr, self) +} +func AtgLogicalNot_(ptr *Ctensor, self Ctensor){ +C.atg_logical_not_(ptr, self) +} +func AtgLogicalNotOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_logical_not_out(ptr, out, self) +} +func AtgLogicalOr(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_logical_or(ptr, self, other) +} +func AtgLogicalOr_(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_logical_or_(ptr, self, other) +} +func AtgLogicalOrOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ +C.atg_logical_or_out(ptr, out, self, other) +} +func AtgLogicalXor(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_logical_xor(ptr, self, other) +} +func AtgLogicalXor_(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_logical_xor_(ptr, self, other) +} +func AtgLogicalXorOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ +C.atg_logical_xor_out(ptr, out, self, other) +} +func AtgLogspace(ptr *Ctensor, start Cscalar, end Cscalar, steps int64, base float64, optionsKind int32, optionsDevice int32){ +csteps := *(*C.int64_t)(unsafe.Pointer(&steps)) +cbase := *(*C.double)(unsafe.Pointer(&base)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_logspace(ptr, start , end , csteps, cbase, coptionsKind, coptionsDevice) +} +func AtgLogspaceOut(ptr *Ctensor, out Ctensor, start Cscalar, end Cscalar, steps int64, base float64){ +csteps := *(*C.int64_t)(unsafe.Pointer(&steps)) +cbase := *(*C.double)(unsafe.Pointer(&base)) +C.atg_logspace_out(ptr, out, start , end , csteps, cbase) +} +func AtgLogsumexp(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32){ +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_logsumexp(ptr, self, cdimDataPtr, cdimLen, ckeepdim) +} +func AtgLogsumexpOut(ptr *Ctensor, out Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32){ +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_logsumexp_out(ptr, out, self, cdimDataPtr, cdimLen, ckeepdim) +} +func AtgLstm(ptr *Ctensor, input Ctensor, hxData []Ctensor, hxLen int, paramsData []Ctensor, paramsLen int, hasBiases int32, numLayers int64, dropout float64, train int32, bidirectional int32, batchFirst int32){ +chxDataPtr := (*Ctensor)(unsafe.Pointer(&hxData[0])) +chxLen := *(*C.int)(unsafe.Pointer(&hxLen)) +cparamsDataPtr := (*Ctensor)(unsafe.Pointer(¶msData[0])) +cparamsLen := *(*C.int)(unsafe.Pointer(¶msLen)) +chasBiases := *(*C.int)(unsafe.Pointer(&hasBiases)) +cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) +cdropout := *(*C.double)(unsafe.Pointer(&dropout)) +ctrain := *(*C.int)(unsafe.Pointer(&train)) +cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) +cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst)) +C.atg_lstm(ptr, input, chxDataPtr, chxLen, cparamsDataPtr, cparamsLen, chasBiases, cnumLayers, cdropout, ctrain, cbidirectional, cbatchFirst) +} +func AtgLstm1(ptr *Ctensor, data Ctensor, batchSizes Ctensor, hxData []Ctensor, hxLen int, paramsData []Ctensor, paramsLen int, hasBiases int32, numLayers int64, dropout float64, train int32, bidirectional int32){ +chxDataPtr := (*Ctensor)(unsafe.Pointer(&hxData[0])) +chxLen := *(*C.int)(unsafe.Pointer(&hxLen)) +cparamsDataPtr := (*Ctensor)(unsafe.Pointer(¶msData[0])) +cparamsLen := *(*C.int)(unsafe.Pointer(¶msLen)) +chasBiases := *(*C.int)(unsafe.Pointer(&hasBiases)) +cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) +cdropout := *(*C.double)(unsafe.Pointer(&dropout)) +ctrain := *(*C.int)(unsafe.Pointer(&train)) +cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) +C.atg_lstm1(ptr, data, batchSizes, chxDataPtr, chxLen, cparamsDataPtr, cparamsLen, chasBiases, cnumLayers, cdropout, ctrain, cbidirectional) +} +func AtgLstmCell(ptr *Ctensor, input Ctensor, hxData []Ctensor, hxLen int, wIh Ctensor, wHh Ctensor, bIh Ctensor, bHh Ctensor){ +chxDataPtr := (*Ctensor)(unsafe.Pointer(&hxData[0])) +chxLen := *(*C.int)(unsafe.Pointer(&hxLen)) +C.atg_lstm_cell(ptr, input, chxDataPtr, chxLen, wIh, wHh, bIh, bHh) +} +func AtgLstsq(ptr *Ctensor, self Ctensor, a Ctensor){ +C.atg_lstsq(ptr, self, a) +} +func AtgLstsqOut(ptr *Ctensor, x Ctensor, qr Ctensor, self Ctensor, a Ctensor){ +C.atg_lstsq_out(ptr, x, qr, self, a) +} +func AtgLt(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_lt(ptr, self, other ) +} +func AtgLt1(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_lt1(ptr, self, other) +} +func AtgLt_(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_lt_(ptr, self, other ) +} +func AtgLt1_(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_lt_1(ptr, self, other) +} +func AtgLtOut(ptr *Ctensor, out Ctensor, self Ctensor, other Cscalar){ +C.atg_lt_out(ptr, out, self, other ) +} +func AtgLtOut1(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ +C.atg_lt_out1(ptr, out, self, other) +} +func AtgLuSolve(ptr *Ctensor, self Ctensor, lUData Ctensor, lUPivots Ctensor){ +C.atg_lu_solve(ptr, self, lUData, lUPivots) +} +func AtgLuSolveOut(ptr *Ctensor, out Ctensor, self Ctensor, lUData Ctensor, lUPivots Ctensor){ +C.atg_lu_solve_out(ptr, out, self, lUData, lUPivots) +} +func AtgMarginRankingLoss(ptr *Ctensor, input1 Ctensor, input2 Ctensor, target Ctensor, margin float64, reduction int64){ +cmargin := *(*C.double)(unsafe.Pointer(&margin)) +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +C.atg_margin_ranking_loss(ptr, input1, input2, target, cmargin, creduction) +} +func AtgMaskedFill(ptr *Ctensor, self Ctensor, mask Ctensor, value Cscalar){ +C.atg_masked_fill(ptr, self, mask, value ) +} +func AtgMaskedFill1(ptr *Ctensor, self Ctensor, mask Ctensor, value Ctensor){ +C.atg_masked_fill1(ptr, self, mask, value) +} +func AtgMaskedFill_(ptr *Ctensor, self Ctensor, mask Ctensor, value Cscalar){ +C.atg_masked_fill_(ptr, self, mask, value ) +} +func AtgMaskedFill1_(ptr *Ctensor, self Ctensor, mask Ctensor, value Ctensor){ +C.atg_masked_fill_1(ptr, self, mask, value) +} +func AtgMaskedScatter(ptr *Ctensor, self Ctensor, mask Ctensor, source Ctensor){ +C.atg_masked_scatter(ptr, self, mask, source) +} +func AtgMaskedScatter_(ptr *Ctensor, self Ctensor, mask Ctensor, source Ctensor){ +C.atg_masked_scatter_(ptr, self, mask, source) +} +func AtgMaskedSelect(ptr *Ctensor, self Ctensor, mask Ctensor){ +C.atg_masked_select(ptr, self, mask) +} +func AtgMaskedSelectOut(ptr *Ctensor, out Ctensor, self Ctensor, mask Ctensor){ +C.atg_masked_select_out(ptr, out, self, mask) +} +func AtgMatmul(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_matmul(ptr, self, other) +} +func AtgMatmulOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ +C.atg_matmul_out(ptr, out, self, other) +} +func AtgMatrixPower(ptr *Ctensor, self Ctensor, n int64){ +cn := *(*C.int64_t)(unsafe.Pointer(&n)) +C.atg_matrix_power(ptr, self, cn) +} +func AtgMatrixRank(ptr *Ctensor, self Ctensor, symmetric int32){ +csymmetric := *(*C.int)(unsafe.Pointer(&symmetric)) +C.atg_matrix_rank(ptr, self, csymmetric) +} +func AtgMatrixRank1(ptr *Ctensor, self Ctensor, tol float64, symmetric int32){ +ctol := *(*C.double)(unsafe.Pointer(&tol)) +csymmetric := *(*C.int)(unsafe.Pointer(&symmetric)) +C.atg_matrix_rank1(ptr, self, ctol, csymmetric) +} +func AtgMax(ptr *Ctensor, self Ctensor){ +C.atg_max(ptr, self) +} +func AtgMax1(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_max1(ptr, self, other) +} +func AtgMax2(ptr *Ctensor, self Ctensor, dim int64, keepdim int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_max2(ptr, self, cdim, ckeepdim) +} +func AtgMaxOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ +C.atg_max_out(ptr, out, self, other) +} +func AtgMaxOut1(ptr *Ctensor, max Ctensor, maxValues Ctensor, self Ctensor, dim int64, keepdim int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_max_out1(ptr, max, maxValues, self, cdim, ckeepdim) +} +func AtgMaxPool1d(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) +C.atg_max_pool1d(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode) +} +func AtgMaxPool1dWithIndices(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) +C.atg_max_pool1d_with_indices(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode) +} +func AtgMaxPool2d(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) +C.atg_max_pool2d(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode) +} +func AtgMaxPool2dWithIndices(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) +C.atg_max_pool2d_with_indices(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode) +} +func AtgMaxPool2dWithIndicesBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32, indices Ctensor){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) +C.atg_max_pool2d_with_indices_backward(ptr, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode, indices) +} +func AtgMaxPool2dWithIndicesBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32, indices Ctensor){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) +C.atg_max_pool2d_with_indices_backward_out(ptr, gradInput, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode, indices) +} +func AtgMaxPool2dWithIndicesOut(ptr *Ctensor, out Ctensor, indices Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) +C.atg_max_pool2d_with_indices_out(ptr, out, indices, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode) +} +func AtgMaxPool3d(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) +C.atg_max_pool3d(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode) +} +func AtgMaxPool3dWithIndices(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) +C.atg_max_pool3d_with_indices(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode) +} +func AtgMaxPool3dWithIndicesBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32, indices Ctensor){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) +C.atg_max_pool3d_with_indices_backward(ptr, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode, indices) +} +func AtgMaxPool3dWithIndicesBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32, indices Ctensor){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) +C.atg_max_pool3d_with_indices_backward_out(ptr, gradInput, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode, indices) +} +func AtgMaxPool3dWithIndicesOut(ptr *Ctensor, out Ctensor, indices Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) +C.atg_max_pool3d_with_indices_out(ptr, out, indices, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode) +} +func AtgMaxUnpool2d(ptr *Ctensor, self Ctensor, indices Ctensor, outputSizeData []int64, outputSizeLen int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +C.atg_max_unpool2d(ptr, self, indices, coutputSizeDataPtr, coutputSizeLen) +} +func AtgMaxUnpool2dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, indices Ctensor, outputSizeData []int64, outputSizeLen int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +C.atg_max_unpool2d_backward(ptr, gradOutput, self, indices, coutputSizeDataPtr, coutputSizeLen) +} +func AtgMaxUnpool2dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, indices Ctensor, outputSizeData []int64, outputSizeLen int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +C.atg_max_unpool2d_backward_out(ptr, gradInput, gradOutput, self, indices, coutputSizeDataPtr, coutputSizeLen) +} +func AtgMaxUnpool2dOut(ptr *Ctensor, out Ctensor, self Ctensor, indices Ctensor, outputSizeData []int64, outputSizeLen int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +C.atg_max_unpool2d_out(ptr, out, self, indices, coutputSizeDataPtr, coutputSizeLen) +} +func AtgMaxUnpool3d(ptr *Ctensor, self Ctensor, indices Ctensor, outputSizeData []int64, outputSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +C.atg_max_unpool3d(ptr, self, indices, coutputSizeDataPtr, coutputSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen) +} +func AtgMaxUnpool3dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, indices Ctensor, outputSizeData []int64, outputSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +C.atg_max_unpool3d_backward(ptr, gradOutput, self, indices, coutputSizeDataPtr, coutputSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen) +} +func AtgMaxUnpool3dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, indices Ctensor, outputSizeData []int64, outputSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +C.atg_max_unpool3d_backward_out(ptr, gradInput, gradOutput, self, indices, coutputSizeDataPtr, coutputSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen) +} +func AtgMaxUnpool3dOut(ptr *Ctensor, out Ctensor, self Ctensor, indices Ctensor, outputSizeData []int64, outputSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +C.atg_max_unpool3d_out(ptr, out, self, indices, coutputSizeDataPtr, coutputSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen) +} +func AtgMaxValues(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32){ +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_max_values(ptr, self, cdimDataPtr, cdimLen, ckeepdim) +} +func AtgMean(ptr *Ctensor, self Ctensor, dtype int32){ +cdtype := *(*C.int)(unsafe.Pointer(&dtype)) +C.atg_mean(ptr, self, cdtype) +} +func AtgMean1(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32, dtype int32){ +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +cdtype := *(*C.int)(unsafe.Pointer(&dtype)) +C.atg_mean1(ptr, self, cdimDataPtr, cdimLen, ckeepdim, cdtype) +} +func AtgMeanOut(ptr *Ctensor, out Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32, dtype int32){ +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +cdtype := *(*C.int)(unsafe.Pointer(&dtype)) +C.atg_mean_out(ptr, out, self, cdimDataPtr, cdimLen, ckeepdim, cdtype) +} +func AtgMedian(ptr *Ctensor, self Ctensor){ +C.atg_median(ptr, self) +} +func AtgMedian1(ptr *Ctensor, self Ctensor, dim int64, keepdim int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_median1(ptr, self, cdim, ckeepdim) +} +func AtgMedianOut(ptr *Ctensor, values Ctensor, indices Ctensor, self Ctensor, dim int64, keepdim int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_median_out(ptr, values, indices, self, cdim, ckeepdim) +} + +func AtgMin(ptr *Ctensor, self Ctensor){ +C.atg_min(ptr, self) +} +func AtgMin1(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_min1(ptr, self, other) +} +func AtgMin2(ptr *Ctensor, self Ctensor, dim int64, keepdim int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_min2(ptr, self, cdim, ckeepdim) +} +func AtgMinOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ +C.atg_min_out(ptr, out, self, other) +} +func AtgMinOut1(ptr *Ctensor, min Ctensor, minIndices Ctensor, self Ctensor, dim int64, keepdim int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_min_out1(ptr, min, minIndices, self, cdim, ckeepdim) +} +func AtgMinValues(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32){ +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_min_values(ptr, self, cdimDataPtr, cdimLen, ckeepdim) +} +func AtgMiopenBatchNorm(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, runningMean Ctensor, runningVar Ctensor, training int32, exponentialAverageFactor float64, epsilon float64){ +ctraining := *(*C.int)(unsafe.Pointer(&training)) +cexponentialAverageFactor := *(*C.double)(unsafe.Pointer(&exponentialAverageFactor)) +cepsilon := *(*C.double)(unsafe.Pointer(&epsilon)) +C.atg_miopen_batch_norm(ptr, input, weight, bias, runningMean, runningVar, ctraining, cexponentialAverageFactor, cepsilon) +} +func AtgMiopenBatchNormBackward(ptr *Ctensor, input Ctensor, gradOutput Ctensor, weight Ctensor, runningMean Ctensor, runningVar Ctensor, saveMean Ctensor, saveVar Ctensor, epsilon float64){ +cepsilon := *(*C.double)(unsafe.Pointer(&epsilon)) +C.atg_miopen_batch_norm_backward(ptr, input, gradOutput, weight, runningMean, runningVar, saveMean, saveVar, cepsilon) +} +func AtgMiopenConvolution(ptr *Ctensor, self Ctensor, weight Ctensor, bias Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){ +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) +cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) +cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) +C.atg_miopen_convolution(ptr, self, weight, bias, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) +} +func AtgMiopenConvolutionBackwardBias(ptr *Ctensor, gradOutput Ctensor){ +C.atg_miopen_convolution_backward_bias(ptr, gradOutput) +} +func AtgMiopenConvolutionBackwardInput(ptr *Ctensor, selfSizeData []int64, selfSizeLen int, gradOutput Ctensor, weight Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){ +cselfSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&selfSizeData[0])) +cselfSizeLen := *(*C.int)(unsafe.Pointer(&selfSizeLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) +cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) +cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) +C.atg_miopen_convolution_backward_input(ptr, cselfSizeDataPtr, cselfSizeLen, gradOutput, weight, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) +} +func AtgMiopenConvolutionBackwardWeight(ptr *Ctensor, weightSizeData []int64, weightSizeLen int, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){ +cweightSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&weightSizeData[0])) +cweightSizeLen := *(*C.int)(unsafe.Pointer(&weightSizeLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) +cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) +cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) +C.atg_miopen_convolution_backward_weight(ptr, cweightSizeDataPtr, cweightSizeLen, gradOutput, self, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) +} +func AtgMiopenConvolutionTranspose(ptr *Ctensor, self Ctensor, weight Ctensor, bias Ctensor, paddingData []int64, paddingLen int, outputPaddingData []int64, outputPaddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){ +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) +coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) +cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) +cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) +C.atg_miopen_convolution_transpose(ptr, self, weight, bias, cpaddingDataPtr, cpaddingLen, coutputPaddingDataPtr, coutputPaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) +} +func AtgMiopenConvolutionTransposeBackwardInput(ptr *Ctensor, gradOutput Ctensor, weight Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){ +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) +cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) +cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) +C.atg_miopen_convolution_transpose_backward_input(ptr, gradOutput, weight, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) +} +func AtgMiopenConvolutionTransposeBackwardWeight(ptr *Ctensor, weightSizeData []int64, weightSizeLen int, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){ +cweightSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&weightSizeData[0])) +cweightSizeLen := *(*C.int)(unsafe.Pointer(&weightSizeLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) +cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) +cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) +C.atg_miopen_convolution_transpose_backward_weight(ptr, cweightSizeDataPtr, cweightSizeLen, gradOutput, self, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) +} +func AtgMiopenDepthwiseConvolution(ptr *Ctensor, self Ctensor, weight Ctensor, bias Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){ +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) +cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) +cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) +C.atg_miopen_depthwise_convolution(ptr, self, weight, bias, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) +} +func AtgMiopenDepthwiseConvolutionBackwardInput(ptr *Ctensor, selfSizeData []int64, selfSizeLen int, gradOutput Ctensor, weight Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){ +cselfSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&selfSizeData[0])) +cselfSizeLen := *(*C.int)(unsafe.Pointer(&selfSizeLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) +cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) +cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) +C.atg_miopen_depthwise_convolution_backward_input(ptr, cselfSizeDataPtr, cselfSizeLen, gradOutput, weight, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) +} +func AtgMiopenDepthwiseConvolutionBackwardWeight(ptr *Ctensor, weightSizeData []int64, weightSizeLen int, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){ +cweightSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&weightSizeData[0])) +cweightSizeLen := *(*C.int)(unsafe.Pointer(&weightSizeLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) +cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) +cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) +C.atg_miopen_depthwise_convolution_backward_weight(ptr, cweightSizeDataPtr, cweightSizeLen, gradOutput, self, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) +} +func AtgMiopenRnn(ptr *Ctensor, input Ctensor, weightData []Ctensor, weightLen int, weightStride0 int64, hx Ctensor, cx Ctensor, mode int64, hiddenSize int64, numLayers int64, batchFirst int32, dropout float64, train int32, bidirectional int32, batchSizesData []int64, batchSizesLen int, dropoutState Ctensor){ +cweightDataPtr := (*Ctensor)(unsafe.Pointer(&weightData[0])) +cweightLen := *(*C.int)(unsafe.Pointer(&weightLen)) +cweightStride0 := *(*C.int64_t)(unsafe.Pointer(&weightStride0)) +cmode := *(*C.int64_t)(unsafe.Pointer(&mode)) +chiddenSize := *(*C.int64_t)(unsafe.Pointer(&hiddenSize)) +cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) +cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst)) +cdropout := *(*C.double)(unsafe.Pointer(&dropout)) +ctrain := *(*C.int)(unsafe.Pointer(&train)) +cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) +cbatchSizesDataPtr := (*C.int64_t)(unsafe.Pointer(&batchSizesData[0])) +cbatchSizesLen := *(*C.int)(unsafe.Pointer(&batchSizesLen)) +C.atg_miopen_rnn(ptr, input, cweightDataPtr, cweightLen, cweightStride0, hx, cx, cmode, chiddenSize, cnumLayers, cbatchFirst, cdropout, ctrain, cbidirectional, cbatchSizesDataPtr, cbatchSizesLen, dropoutState) +} +func AtgMkldnnAdaptiveAvgPool2d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +C.atg_mkldnn_adaptive_avg_pool2d(ptr, self, coutputSizeDataPtr, coutputSizeLen) +} +func AtgMkldnnConvolution(ptr *Ctensor, self Ctensor, weight Ctensor, bias Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64){ +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) +C.atg_mkldnn_convolution(ptr, self, weight, bias, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups) +} +func AtgMkldnnConvolutionBackwardInput(ptr *Ctensor, selfSizeData []int64, selfSizeLen int, gradOutput Ctensor, weight Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, biasDefined int32){ +cselfSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&selfSizeData[0])) +cselfSizeLen := *(*C.int)(unsafe.Pointer(&selfSizeLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) +cbiasDefined := *(*C.int)(unsafe.Pointer(&biasDefined)) +C.atg_mkldnn_convolution_backward_input(ptr, cselfSizeDataPtr, cselfSizeLen, gradOutput, weight, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbiasDefined) +} +func AtgMkldnnConvolutionBackwardWeights(ptr *Ctensor, weightSizeData []int64, weightSizeLen int, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, biasDefined int32){ +cweightSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&weightSizeData[0])) +cweightSizeLen := *(*C.int)(unsafe.Pointer(&weightSizeLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) +cbiasDefined := *(*C.int)(unsafe.Pointer(&biasDefined)) +C.atg_mkldnn_convolution_backward_weights(ptr, cweightSizeDataPtr, cweightSizeLen, gradOutput, self, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbiasDefined) +} +func AtgMkldnnLinear(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor){ +C.atg_mkldnn_linear(ptr, input, weight, bias) +} +func AtgMkldnnMaxPool2d(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) +C.atg_mkldnn_max_pool2d(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode) +} +func AtgMkldnnReorderConv2dWeight(ptr *Ctensor, self Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64){ +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) +C.atg_mkldnn_reorder_conv2d_weight(ptr, self, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups) +} +func AtgMm(ptr *Ctensor, self Ctensor, mat2 Ctensor){ +C.atg_mm(ptr, self, mat2) +} +func AtgMmOut(ptr *Ctensor, out Ctensor, self Ctensor, mat2 Ctensor){ +C.atg_mm_out(ptr, out, self, mat2) +} +func AtgMode(ptr *Ctensor, self Ctensor, dim int64, keepdim int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_mode(ptr, self, cdim, ckeepdim) +} +func AtgModeOut(ptr *Ctensor, values Ctensor, indices Ctensor, self Ctensor, dim int64, keepdim int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_mode_out(ptr, values, indices, self, cdim, ckeepdim) +} +func AtgMseLoss(ptr *Ctensor, self Ctensor, target Ctensor, reduction int64){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +C.atg_mse_loss(ptr, self, target, creduction) +} +func AtgMseLossBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, reduction int64){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +C.atg_mse_loss_backward(ptr, gradOutput, self, target, creduction) +} +func AtgMseLossBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, reduction int64){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +C.atg_mse_loss_backward_out(ptr, gradInput, gradOutput, self, target, creduction) +} +func AtgMseLossOut(ptr *Ctensor, out Ctensor, self Ctensor, target Ctensor, reduction int64){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +C.atg_mse_loss_out(ptr, out, self, target, creduction) +} +func AtgMul(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_mul(ptr, self, other) +} +func AtgMul1(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_mul1(ptr, self, other ) +} +func AtgMul_(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_mul_(ptr, self, other) +} +func AtgMul1_(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_mul_1(ptr, self, other ) +} +func AtgMulOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ +C.atg_mul_out(ptr, out, self, other) +} +func AtgMultiMarginLossBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, p Cscalar, margin Cscalar, weight Ctensor, reduction int64){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +C.atg_multi_margin_loss_backward(ptr, gradOutput, self, target, p , margin , weight, creduction) +} +func AtgMultiMarginLossBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, p Cscalar, margin Cscalar, weight Ctensor, reduction int64){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +C.atg_multi_margin_loss_backward_out(ptr, gradInput, gradOutput, self, target, p , margin , weight, creduction) +} +func AtgMultilabelMarginLoss(ptr *Ctensor, self Ctensor, target Ctensor, reduction int64){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +C.atg_multilabel_margin_loss(ptr, self, target, creduction) +} +func AtgMultilabelMarginLossBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, reduction int64, isTarget Ctensor){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +C.atg_multilabel_margin_loss_backward(ptr, gradOutput, self, target, creduction, isTarget) +} +func AtgMultilabelMarginLossBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, reduction int64, isTarget Ctensor){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +C.atg_multilabel_margin_loss_backward_out(ptr, gradInput, gradOutput, self, target, creduction, isTarget) +} +func AtgMultilabelMarginLossOut(ptr *Ctensor, out Ctensor, self Ctensor, target Ctensor, reduction int64){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +C.atg_multilabel_margin_loss_out(ptr, out, self, target, creduction) +} +func AtgMultinomial(ptr *Ctensor, self Ctensor, numSamples int64, replacement int32){ +cnumSamples := *(*C.int64_t)(unsafe.Pointer(&numSamples)) +creplacement := *(*C.int)(unsafe.Pointer(&replacement)) +C.atg_multinomial(ptr, self, cnumSamples, creplacement) +} +func AtgMultinomialOut(ptr *Ctensor, out Ctensor, self Ctensor, numSamples int64, replacement int32){ +cnumSamples := *(*C.int64_t)(unsafe.Pointer(&numSamples)) +creplacement := *(*C.int)(unsafe.Pointer(&replacement)) +C.atg_multinomial_out(ptr, out, self, cnumSamples, creplacement) +} +func AtgMv(ptr *Ctensor, self Ctensor, vec Ctensor){ +C.atg_mv(ptr, self, vec) +} +func AtgMvOut(ptr *Ctensor, out Ctensor, self Ctensor, vec Ctensor){ +C.atg_mv_out(ptr, out, self, vec) +} +func AtgMvlgamma(ptr *Ctensor, self Ctensor, p int64){ +cp := *(*C.int64_t)(unsafe.Pointer(&p)) +C.atg_mvlgamma(ptr, self, cp) +} +func AtgMvlgamma_(ptr *Ctensor, self Ctensor, p int64){ +cp := *(*C.int64_t)(unsafe.Pointer(&p)) +C.atg_mvlgamma_(ptr, self, cp) +} +func AtgNarrow(ptr *Ctensor, self Ctensor, dim int64, start int64, length int64){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +cstart := *(*C.int64_t)(unsafe.Pointer(&start)) +clength := *(*C.int64_t)(unsafe.Pointer(&length)) +C.atg_narrow(ptr, self, cdim, cstart, clength) +} +func AtgNarrow1(ptr *Ctensor, self Ctensor, dim int64, start Ctensor, length int64){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +clength := *(*C.int64_t)(unsafe.Pointer(&length)) +C.atg_narrow1(ptr, self, cdim, start, clength) +} +func AtgNarrowCopy(ptr *Ctensor, self Ctensor, dim int64, start int64, length int64){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +cstart := *(*C.int64_t)(unsafe.Pointer(&start)) +clength := *(*C.int64_t)(unsafe.Pointer(&length)) +C.atg_narrow_copy(ptr, self, cdim, cstart, clength) +} +func AtgNativeBatchNorm(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, runningMean Ctensor, runningVar Ctensor, training int32, momentum float64, eps float64){ +ctraining := *(*C.int)(unsafe.Pointer(&training)) +cmomentum := *(*C.double)(unsafe.Pointer(&momentum)) +ceps := *(*C.double)(unsafe.Pointer(&eps)) +C.atg_native_batch_norm(ptr, input, weight, bias, runningMean, runningVar, ctraining, cmomentum, ceps) +} +func AtgNativeBatchNormOut(ptr *Ctensor, out Ctensor, saveMean Ctensor, saveInvstd Ctensor, input Ctensor, weight Ctensor, bias Ctensor, runningMean Ctensor, runningVar Ctensor, training int32, momentum float64, eps float64){ +ctraining := *(*C.int)(unsafe.Pointer(&training)) +cmomentum := *(*C.double)(unsafe.Pointer(&momentum)) +ceps := *(*C.double)(unsafe.Pointer(&eps)) +C.atg_native_batch_norm_out(ptr, out, saveMean, saveInvstd, input, weight, bias, runningMean, runningVar, ctraining, cmomentum, ceps) +} +func AtgNativeLayerNorm(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, m int64, n int64, eps float64){ +cm := *(*C.int64_t)(unsafe.Pointer(&m)) +cn := *(*C.int64_t)(unsafe.Pointer(&n)) +ceps := *(*C.double)(unsafe.Pointer(&eps)) +C.atg_native_layer_norm(ptr, input, weight, bias, cm, cn, ceps) +} +func AtgNativeNorm(ptr *Ctensor, self Ctensor){ +C.atg_native_norm(ptr, self) +} +func AtgNe(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_ne(ptr, self, other ) +} +func AtgNe1(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_ne1(ptr, self, other) +} +func AtgNe_(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_ne_(ptr, self, other ) +} +func AtgNe1_(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_ne_1(ptr, self, other) +} +func AtgNeOut(ptr *Ctensor, out Ctensor, self Ctensor, other Cscalar){ +C.atg_ne_out(ptr, out, self, other ) +} +func AtgNeOut1(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ +C.atg_ne_out1(ptr, out, self, other) +} +func AtgNeg(ptr *Ctensor, self Ctensor){ +C.atg_neg(ptr, self) +} +func AtgNeg_(ptr *Ctensor, self Ctensor){ +C.atg_neg_(ptr, self) +} +func AtgNegOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_neg_out(ptr, out, self) +} +func AtgNewEmpty(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32){ +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_new_empty(ptr, self, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice) +} +func AtgNewFull(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int, fillValue Cscalar, optionsKind int32, optionsDevice int32){ +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_new_full(ptr, self, csizeDataPtr, csizeLen, fillValue , coptionsKind, coptionsDevice) +} +func AtgNewZeros(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32){ +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_new_zeros(ptr, self, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice) +} +func AtgNllLoss(ptr *Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64, ignoreIndex int64){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +cignoreIndex := *(*C.int64_t)(unsafe.Pointer(&ignoreIndex)) +C.atg_nll_loss(ptr, self, target, weight, creduction, cignoreIndex) +} +func AtgNllLoss2d(ptr *Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64, ignoreIndex int64){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +cignoreIndex := *(*C.int64_t)(unsafe.Pointer(&ignoreIndex)) +C.atg_nll_loss2d(ptr, self, target, weight, creduction, cignoreIndex) +} +func AtgNllLoss2dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64, ignoreIndex int64, totalWeight Ctensor){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +cignoreIndex := *(*C.int64_t)(unsafe.Pointer(&ignoreIndex)) +C.atg_nll_loss2d_backward(ptr, gradOutput, self, target, weight, creduction, cignoreIndex, totalWeight) +} +func AtgNllLoss2dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64, ignoreIndex int64, totalWeight Ctensor){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +cignoreIndex := *(*C.int64_t)(unsafe.Pointer(&ignoreIndex)) +C.atg_nll_loss2d_backward_out(ptr, gradInput, gradOutput, self, target, weight, creduction, cignoreIndex, totalWeight) +} +func AtgNllLoss2dOut(ptr *Ctensor, out Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64, ignoreIndex int64){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +cignoreIndex := *(*C.int64_t)(unsafe.Pointer(&ignoreIndex)) +C.atg_nll_loss2d_out(ptr, out, self, target, weight, creduction, cignoreIndex) +} +func AtgNllLossBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64, ignoreIndex int64, totalWeight Ctensor){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +cignoreIndex := *(*C.int64_t)(unsafe.Pointer(&ignoreIndex)) +C.atg_nll_loss_backward(ptr, gradOutput, self, target, weight, creduction, cignoreIndex, totalWeight) +} +func AtgNllLossBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64, ignoreIndex int64, totalWeight Ctensor){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +cignoreIndex := *(*C.int64_t)(unsafe.Pointer(&ignoreIndex)) +C.atg_nll_loss_backward_out(ptr, gradInput, gradOutput, self, target, weight, creduction, cignoreIndex, totalWeight) +} +func AtgNllLossOut(ptr *Ctensor, out Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64, ignoreIndex int64){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +cignoreIndex := *(*C.int64_t)(unsafe.Pointer(&ignoreIndex)) +C.atg_nll_loss_out(ptr, out, self, target, weight, creduction, cignoreIndex) +} +func AtgNonzero(ptr *Ctensor, self Ctensor){ +C.atg_nonzero(ptr, self) +} + +func AtgNonzeroOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_nonzero_out(ptr, out, self) +} +func AtgNorm(ptr *Ctensor, self Ctensor){ +C.atg_norm(ptr, self) +} +func AtgNorm1(ptr *Ctensor, self Ctensor, p Cscalar, dtype int32){ +cdtype := *(*C.int)(unsafe.Pointer(&dtype)) +C.atg_norm1(ptr, self, p , cdtype) +} +func AtgNorm2(ptr *Ctensor, self Ctensor, p Cscalar, dimData []int64, dimLen int, keepdim int32){ +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_norm2(ptr, self, p , cdimDataPtr, cdimLen, ckeepdim) +} +func AtgNorm3(ptr *Ctensor, self Ctensor, p Cscalar, dimData []int64, dimLen int, keepdim int32, dtype int32){ +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +cdtype := *(*C.int)(unsafe.Pointer(&dtype)) +C.atg_norm3(ptr, self, p , cdimDataPtr, cdimLen, ckeepdim, cdtype) +} +func AtgNormExceptDim(ptr *Ctensor, v Ctensor, pow int64, dim int64){ +cpow := *(*C.int64_t)(unsafe.Pointer(&pow)) +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_norm_except_dim(ptr, v, cpow, cdim) +} +func AtgNormOut(ptr *Ctensor, out Ctensor, self Ctensor, p Cscalar, dimData []int64, dimLen int, keepdim int32){ +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_norm_out(ptr, out, self, p , cdimDataPtr, cdimLen, ckeepdim) +} +func AtgNormOut1(ptr *Ctensor, out Ctensor, self Ctensor, p Cscalar, dimData []int64, dimLen int, keepdim int32, dtype int32){ +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +cdtype := *(*C.int)(unsafe.Pointer(&dtype)) +C.atg_norm_out1(ptr, out, self, p , cdimDataPtr, cdimLen, ckeepdim, cdtype) +} +func AtgNormal_(ptr *Ctensor, self Ctensor, mean float64, std float64){ +cmean := *(*C.double)(unsafe.Pointer(&mean)) +cstd := *(*C.double)(unsafe.Pointer(&std)) +C.atg_normal_(ptr, self, cmean, cstd) +} +func AtgNormalOut(ptr *Ctensor, out Ctensor, mean Ctensor, std float64){ +cstd := *(*C.double)(unsafe.Pointer(&std)) +C.atg_normal_out(ptr, out, mean, cstd) +} +func AtgNormalOut1(ptr *Ctensor, out Ctensor, mean float64, std Ctensor){ +cmean := *(*C.double)(unsafe.Pointer(&mean)) +C.atg_normal_out1(ptr, out, cmean, std) +} +func AtgNormalOut2(ptr *Ctensor, out Ctensor, mean Ctensor, std Ctensor){ +C.atg_normal_out2(ptr, out, mean, std) +} +func AtgNormalOut3(ptr *Ctensor, out Ctensor, mean float64, std float64, sizeData []int64, sizeLen int){ +cmean := *(*C.double)(unsafe.Pointer(&mean)) +cstd := *(*C.double)(unsafe.Pointer(&std)) +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +C.atg_normal_out3(ptr, out, cmean, cstd, csizeDataPtr, csizeLen) +} +func AtgNuclearNorm(ptr *Ctensor, self Ctensor, keepdim int32){ +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_nuclear_norm(ptr, self, ckeepdim) +} +func AtgNuclearNorm1(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32){ +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_nuclear_norm1(ptr, self, cdimDataPtr, cdimLen, ckeepdim) +} +func AtgNuclearNormOut(ptr *Ctensor, out Ctensor, self Ctensor, keepdim int32){ +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_nuclear_norm_out(ptr, out, self, ckeepdim) +} +func AtgNuclearNormOut1(ptr *Ctensor, out Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32){ +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_nuclear_norm_out1(ptr, out, self, cdimDataPtr, cdimLen, ckeepdim) +} +func AtgNumpyT(ptr *Ctensor, self Ctensor){ +C.atg_numpy_t(ptr, self) +} +func AtgOneHot(ptr *Ctensor, self Ctensor, numClasses int64){ +cnumClasses := *(*C.int64_t)(unsafe.Pointer(&numClasses)) +C.atg_one_hot(ptr, self, cnumClasses) +} +func AtgOnes(ptr *Ctensor, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32){ +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_ones(ptr, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice) +} +func AtgOnesLike(ptr *Ctensor, self Ctensor){ +C.atg_ones_like(ptr, self) +} +func AtgOnesOut(ptr *Ctensor, out Ctensor, sizeData []int64, sizeLen int){ +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +C.atg_ones_out(ptr, out, csizeDataPtr, csizeLen) +} +func AtgOrgqr(ptr *Ctensor, self Ctensor, input2 Ctensor){ +C.atg_orgqr(ptr, self, input2) +} +func AtgOrgqrOut(ptr *Ctensor, out Ctensor, self Ctensor, input2 Ctensor){ +C.atg_orgqr_out(ptr, out, self, input2) +} +func AtgOrmqr(ptr *Ctensor, self Ctensor, input2 Ctensor, input3 Ctensor, left int32, transpose int32){ +cleft := *(*C.int)(unsafe.Pointer(&left)) +ctranspose := *(*C.int)(unsafe.Pointer(&transpose)) +C.atg_ormqr(ptr, self, input2, input3, cleft, ctranspose) +} +func AtgOrmqrOut(ptr *Ctensor, out Ctensor, self Ctensor, input2 Ctensor, input3 Ctensor, left int32, transpose int32){ +cleft := *(*C.int)(unsafe.Pointer(&left)) +ctranspose := *(*C.int)(unsafe.Pointer(&transpose)) +C.atg_ormqr_out(ptr, out, self, input2, input3, cleft, ctranspose) +} +func AtgPairwiseDistance(ptr *Ctensor, x1 Ctensor, x2 Ctensor, p float64, eps float64, keepdim int32){ +cp := *(*C.double)(unsafe.Pointer(&p)) +ceps := *(*C.double)(unsafe.Pointer(&eps)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_pairwise_distance(ptr, x1, x2, cp, ceps, ckeepdim) +} +func AtgPdist(ptr *Ctensor, self Ctensor, p float64){ +cp := *(*C.double)(unsafe.Pointer(&p)) +C.atg_pdist(ptr, self, cp) +} +func AtgPermute(ptr *Ctensor, self Ctensor, dimsData []int64, dimsLen int){ +cdimsDataPtr := (*C.int64_t)(unsafe.Pointer(&dimsData[0])) +cdimsLen := *(*C.int)(unsafe.Pointer(&dimsLen)) +C.atg_permute(ptr, self, cdimsDataPtr, cdimsLen) +} +func AtgPinMemory(ptr *Ctensor, self Ctensor){ +C.atg_pin_memory(ptr, self) +} +func AtgPinverse(ptr *Ctensor, self Ctensor, rcond float64){ +crcond := *(*C.double)(unsafe.Pointer(&rcond)) +C.atg_pinverse(ptr, self, crcond) +} +func AtgPixelShuffle(ptr *Ctensor, self Ctensor, upscaleFactor int64){ +cupscaleFactor := *(*C.int64_t)(unsafe.Pointer(&upscaleFactor)) +C.atg_pixel_shuffle(ptr, self, cupscaleFactor) +} +func AtgPoisson(ptr *Ctensor, self Ctensor){ +C.atg_poisson(ptr, self) +} +func AtgPoissonNllLoss(ptr *Ctensor, input Ctensor, target Ctensor, logInput int32, full int32, eps float64, reduction int64){ +clogInput := *(*C.int)(unsafe.Pointer(&logInput)) +cfull := *(*C.int)(unsafe.Pointer(&full)) +ceps := *(*C.double)(unsafe.Pointer(&eps)) +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +C.atg_poisson_nll_loss(ptr, input, target, clogInput, cfull, ceps, creduction) +} +func AtgPolygamma(ptr *Ctensor, n int64, self Ctensor){ +cn := *(*C.int64_t)(unsafe.Pointer(&n)) +C.atg_polygamma(ptr, cn, self) +} +func AtgPolygamma_(ptr *Ctensor, self Ctensor, n int64){ +cn := *(*C.int64_t)(unsafe.Pointer(&n)) +C.atg_polygamma_(ptr, self, cn) +} +func AtgPolygammaOut(ptr *Ctensor, out Ctensor, n int64, self Ctensor){ +cn := *(*C.int64_t)(unsafe.Pointer(&n)) +C.atg_polygamma_out(ptr, out, cn, self) +} +func AtgPow(ptr *Ctensor, self Ctensor, exponent Cscalar){ +C.atg_pow(ptr, self, exponent ) +} +func AtgPow1(ptr *Ctensor, self Ctensor, exponent Ctensor){ +C.atg_pow1(ptr, self, exponent) +} +func AtgPow2(ptr *Ctensor, selfScalar Cscalar, exponent Ctensor){ +C.atg_pow2(ptr, selfScalar , exponent) +} +func AtgPow_(ptr *Ctensor, self Ctensor, exponent Cscalar){ +C.atg_pow_(ptr, self, exponent ) +} +func AtgPow1_(ptr *Ctensor, self Ctensor, exponent Ctensor){ +C.atg_pow_1(ptr, self, exponent) +} +func AtgPowOut(ptr *Ctensor, out Ctensor, self Ctensor, exponent Cscalar){ +C.atg_pow_out(ptr, out, self, exponent ) +} +func AtgPowOut1(ptr *Ctensor, out Ctensor, self Ctensor, exponent Ctensor){ +C.atg_pow_out1(ptr, out, self, exponent) +} +func AtgPowOut2(ptr *Ctensor, out Ctensor, selfScalar Cscalar, exponent Ctensor){ +C.atg_pow_out2(ptr, out, selfScalar , exponent) +} +func AtgPrelu(ptr *Ctensor, self Ctensor, weight Ctensor){ +C.atg_prelu(ptr, self, weight) +} +func AtgPreluBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, weight Ctensor){ +C.atg_prelu_backward(ptr, gradOutput, self, weight) +} +func AtgProd(ptr *Ctensor, self Ctensor, dtype int32){ +cdtype := *(*C.int)(unsafe.Pointer(&dtype)) +C.atg_prod(ptr, self, cdtype) +} +func AtgProd1(ptr *Ctensor, self Ctensor, dim int64, keepdim int32, dtype int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +cdtype := *(*C.int)(unsafe.Pointer(&dtype)) +C.atg_prod1(ptr, self, cdim, ckeepdim, cdtype) +} +func AtgProdOut(ptr *Ctensor, out Ctensor, self Ctensor, dim int64, keepdim int32, dtype int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +cdtype := *(*C.int)(unsafe.Pointer(&dtype)) +C.atg_prod_out(ptr, out, self, cdim, ckeepdim, cdtype) +} +func AtgPut_(ptr *Ctensor, self Ctensor, index Ctensor, source Ctensor, accumulate int32){ +caccumulate := *(*C.int)(unsafe.Pointer(&accumulate)) +C.atg_put_(ptr, self, index, source, caccumulate) +} +func AtgQPerChannelScales(ptr *Ctensor, self Ctensor){ +C.atg_q_per_channel_scales(ptr, self) +} +func AtgQPerChannelZeroPoints(ptr *Ctensor, self Ctensor){ +C.atg_q_per_channel_zero_points(ptr, self) +} +func AtgQr(ptr *Ctensor, self Ctensor, some int32){ +csome := *(*C.int)(unsafe.Pointer(&some)) +C.atg_qr(ptr, self, csome) +} +func AtgQrOut(ptr *Ctensor, q Ctensor, r Ctensor, self Ctensor, some int32){ +csome := *(*C.int)(unsafe.Pointer(&some)) +C.atg_qr_out(ptr, q, r, self, csome) +} +func AtgQuantizePerChannel(ptr *Ctensor, self Ctensor, scales Ctensor, zeroPoints Ctensor, axis int64, dtype int32){ +caxis := *(*C.int64_t)(unsafe.Pointer(&axis)) +cdtype := *(*C.int)(unsafe.Pointer(&dtype)) +C.atg_quantize_per_channel(ptr, self, scales, zeroPoints, caxis, cdtype) +} +func AtgQuantizePerTensor(ptr *Ctensor, self Ctensor, scale float64, zeroPoint int64, dtype int32){ +cscale := *(*C.double)(unsafe.Pointer(&scale)) +czeroPoint := *(*C.int64_t)(unsafe.Pointer(&zeroPoint)) +cdtype := *(*C.int)(unsafe.Pointer(&dtype)) +C.atg_quantize_per_tensor(ptr, self, cscale, czeroPoint, cdtype) +} +func AtgQuantizedBatchNorm(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, mean Ctensor, vari Ctensor, eps float64, outputScale float64, outputZeroPoint int64){ +ceps := *(*C.double)(unsafe.Pointer(&eps)) +coutputScale := *(*C.double)(unsafe.Pointer(&outputScale)) +coutputZeroPoint := *(*C.int64_t)(unsafe.Pointer(&outputZeroPoint)) +C.atg_quantized_batch_norm(ptr, input, weight, bias, mean, vari, ceps, coutputScale, coutputZeroPoint) +} +func AtgQuantizedGru(ptr *Ctensor, input Ctensor, hx Ctensor, paramsData []Ctensor, paramsLen int, hasBiases int32, numLayers int64, dropout float64, train int32, bidirectional int32, batchFirst int32){ +cparamsDataPtr := (*Ctensor)(unsafe.Pointer(¶msData[0])) +cparamsLen := *(*C.int)(unsafe.Pointer(¶msLen)) +chasBiases := *(*C.int)(unsafe.Pointer(&hasBiases)) +cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) +cdropout := *(*C.double)(unsafe.Pointer(&dropout)) +ctrain := *(*C.int)(unsafe.Pointer(&train)) +cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) +cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst)) +C.atg_quantized_gru(ptr, input, hx, cparamsDataPtr, cparamsLen, chasBiases, cnumLayers, cdropout, ctrain, cbidirectional, cbatchFirst) +} +func AtgQuantizedGru1(ptr *Ctensor, data Ctensor, batchSizes Ctensor, hx Ctensor, paramsData []Ctensor, paramsLen int, hasBiases int32, numLayers int64, dropout float64, train int32, bidirectional int32){ +cparamsDataPtr := (*Ctensor)(unsafe.Pointer(¶msData[0])) +cparamsLen := *(*C.int)(unsafe.Pointer(¶msLen)) +chasBiases := *(*C.int)(unsafe.Pointer(&hasBiases)) +cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) +cdropout := *(*C.double)(unsafe.Pointer(&dropout)) +ctrain := *(*C.int)(unsafe.Pointer(&train)) +cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) +C.atg_quantized_gru1(ptr, data, batchSizes, hx, cparamsDataPtr, cparamsLen, chasBiases, cnumLayers, cdropout, ctrain, cbidirectional) +} +func AtgQuantizedGruCell(ptr *Ctensor, input Ctensor, hx Ctensor, wIh Ctensor, wHh Ctensor, bIh Ctensor, bHh Ctensor, packedIh Ctensor, packedHh Ctensor, colOffsetsIh Ctensor, colOffsetsHh Ctensor, scaleIh Cscalar, scaleHh Cscalar, zeroPointIh Cscalar, zeroPointHh Cscalar){ +C.atg_quantized_gru_cell(ptr, input, hx, wIh, wHh, bIh, bHh, packedIh, packedHh, colOffsetsIh, colOffsetsHh, scaleIh , scaleHh , zeroPointIh , zeroPointHh ) +} +func AtgQuantizedLstm(ptr *Ctensor, input Ctensor, hxData []Ctensor, hxLen int, paramsData []Ctensor, paramsLen int, hasBiases int32, numLayers int64, dropout float64, train int32, bidirectional int32, batchFirst int32, dtype int32, useDynamic int32){ +chxDataPtr := (*Ctensor)(unsafe.Pointer(&hxData[0])) +chxLen := *(*C.int)(unsafe.Pointer(&hxLen)) +cparamsDataPtr := (*Ctensor)(unsafe.Pointer(¶msData[0])) +cparamsLen := *(*C.int)(unsafe.Pointer(¶msLen)) +chasBiases := *(*C.int)(unsafe.Pointer(&hasBiases)) +cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) +cdropout := *(*C.double)(unsafe.Pointer(&dropout)) +ctrain := *(*C.int)(unsafe.Pointer(&train)) +cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) +cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst)) +cdtype := *(*C.int)(unsafe.Pointer(&dtype)) +cuseDynamic := *(*C.int)(unsafe.Pointer(&useDynamic)) +C.atg_quantized_lstm(ptr, input, chxDataPtr, chxLen, cparamsDataPtr, cparamsLen, chasBiases, cnumLayers, cdropout, ctrain, cbidirectional, cbatchFirst, cdtype, cuseDynamic) +} +func AtgQuantizedLstm1(ptr *Ctensor, data Ctensor, batchSizes Ctensor, hxData []Ctensor, hxLen int, paramsData []Ctensor, paramsLen int, hasBiases int32, numLayers int64, dropout float64, train int32, bidirectional int32, dtype int32, useDynamic int32){ +chxDataPtr := (*Ctensor)(unsafe.Pointer(&hxData[0])) +chxLen := *(*C.int)(unsafe.Pointer(&hxLen)) +cparamsDataPtr := (*Ctensor)(unsafe.Pointer(¶msData[0])) +cparamsLen := *(*C.int)(unsafe.Pointer(¶msLen)) +chasBiases := *(*C.int)(unsafe.Pointer(&hasBiases)) +cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) +cdropout := *(*C.double)(unsafe.Pointer(&dropout)) +ctrain := *(*C.int)(unsafe.Pointer(&train)) +cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) +cdtype := *(*C.int)(unsafe.Pointer(&dtype)) +cuseDynamic := *(*C.int)(unsafe.Pointer(&useDynamic)) +C.atg_quantized_lstm1(ptr, data, batchSizes, chxDataPtr, chxLen, cparamsDataPtr, cparamsLen, chasBiases, cnumLayers, cdropout, ctrain, cbidirectional, cdtype, cuseDynamic) +} +func AtgQuantizedLstmCell(ptr *Ctensor, input Ctensor, hxData []Ctensor, hxLen int, wIh Ctensor, wHh Ctensor, bIh Ctensor, bHh Ctensor, packedIh Ctensor, packedHh Ctensor, colOffsetsIh Ctensor, colOffsetsHh Ctensor, scaleIh Cscalar, scaleHh Cscalar, zeroPointIh Cscalar, zeroPointHh Cscalar){ +chxDataPtr := (*Ctensor)(unsafe.Pointer(&hxData[0])) +chxLen := *(*C.int)(unsafe.Pointer(&hxLen)) +C.atg_quantized_lstm_cell(ptr, input, chxDataPtr, chxLen, wIh, wHh, bIh, bHh, packedIh, packedHh, colOffsetsIh, colOffsetsHh, scaleIh , scaleHh , zeroPointIh , zeroPointHh ) +} +func AtgQuantizedMaxPool2d(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) +C.atg_quantized_max_pool2d(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode) +} +func AtgQuantizedRnnReluCell(ptr *Ctensor, input Ctensor, hx Ctensor, wIh Ctensor, wHh Ctensor, bIh Ctensor, bHh Ctensor, packedIh Ctensor, packedHh Ctensor, colOffsetsIh Ctensor, colOffsetsHh Ctensor, scaleIh Cscalar, scaleHh Cscalar, zeroPointIh Cscalar, zeroPointHh Cscalar){ +C.atg_quantized_rnn_relu_cell(ptr, input, hx, wIh, wHh, bIh, bHh, packedIh, packedHh, colOffsetsIh, colOffsetsHh, scaleIh , scaleHh , zeroPointIh , zeroPointHh ) +} +func AtgQuantizedRnnTanhCell(ptr *Ctensor, input Ctensor, hx Ctensor, wIh Ctensor, wHh Ctensor, bIh Ctensor, bHh Ctensor, packedIh Ctensor, packedHh Ctensor, colOffsetsIh Ctensor, colOffsetsHh Ctensor, scaleIh Cscalar, scaleHh Cscalar, zeroPointIh Cscalar, zeroPointHh Cscalar){ +C.atg_quantized_rnn_tanh_cell(ptr, input, hx, wIh, wHh, bIh, bHh, packedIh, packedHh, colOffsetsIh, colOffsetsHh, scaleIh , scaleHh , zeroPointIh , zeroPointHh ) +} +func AtgRand(ptr *Ctensor, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32){ +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_rand(ptr, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice) +} +func AtgRandLike(ptr *Ctensor, self Ctensor){ +C.atg_rand_like(ptr, self) +} +func AtgRandOut(ptr *Ctensor, out Ctensor, sizeData []int64, sizeLen int){ +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +C.atg_rand_out(ptr, out, csizeDataPtr, csizeLen) +} +func AtgRandint(ptr *Ctensor, high int64, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32){ +chigh := *(*C.int64_t)(unsafe.Pointer(&high)) +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_randint(ptr, chigh, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice) +} +func AtgRandint1(ptr *Ctensor, low int64, high int64, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32){ +clow := *(*C.int64_t)(unsafe.Pointer(&low)) +chigh := *(*C.int64_t)(unsafe.Pointer(&high)) +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_randint1(ptr, clow, chigh, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice) +} +func AtgRandintLike(ptr *Ctensor, self Ctensor, high int64){ +chigh := *(*C.int64_t)(unsafe.Pointer(&high)) +C.atg_randint_like(ptr, self, chigh) +} +func AtgRandintLike1(ptr *Ctensor, self Ctensor, low int64, high int64){ +clow := *(*C.int64_t)(unsafe.Pointer(&low)) +chigh := *(*C.int64_t)(unsafe.Pointer(&high)) +C.atg_randint_like1(ptr, self, clow, chigh) +} +func AtgRandintOut(ptr *Ctensor, out Ctensor, high int64, sizeData []int64, sizeLen int){ +chigh := *(*C.int64_t)(unsafe.Pointer(&high)) +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +C.atg_randint_out(ptr, out, chigh, csizeDataPtr, csizeLen) +} +func AtgRandintOut1(ptr *Ctensor, out Ctensor, low int64, high int64, sizeData []int64, sizeLen int){ +clow := *(*C.int64_t)(unsafe.Pointer(&low)) +chigh := *(*C.int64_t)(unsafe.Pointer(&high)) +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +C.atg_randint_out1(ptr, out, clow, chigh, csizeDataPtr, csizeLen) +} +func AtgRandn(ptr *Ctensor, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32){ +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_randn(ptr, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice) +} +func AtgRandnLike(ptr *Ctensor, self Ctensor){ +C.atg_randn_like(ptr, self) +} +func AtgRandnOut(ptr *Ctensor, out Ctensor, sizeData []int64, sizeLen int){ +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +C.atg_randn_out(ptr, out, csizeDataPtr, csizeLen) +} +func AtgRandom_(ptr *Ctensor, self Ctensor){ +C.atg_random_(ptr, self) +} +func AtgRandom1_(ptr *Ctensor, self Ctensor, to int64){ +cto := *(*C.int64_t)(unsafe.Pointer(&to)) +C.atg_random_1(ptr, self, cto) +} +func AtgRandom2(ptr *Ctensor, self Ctensor, from int64, to int64){ +cfrom := *(*C.int64_t)(unsafe.Pointer(&from)) +cto := *(*C.int64_t)(unsafe.Pointer(&to)) +C.atg_random_2(ptr, self, cfrom, cto) +} +func AtgRandperm(ptr *Ctensor, n int64, optionsKind int32, optionsDevice int32){ +cn := *(*C.int64_t)(unsafe.Pointer(&n)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_randperm(ptr, cn, coptionsKind, coptionsDevice) +} +func AtgRandpermOut(ptr *Ctensor, out Ctensor, n int64){ +cn := *(*C.int64_t)(unsafe.Pointer(&n)) +C.atg_randperm_out(ptr, out, cn) +} +func AtgRange(ptr *Ctensor, start Cscalar, end Cscalar, optionsKind int32, optionsDevice int32){ +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_range(ptr, start , end , coptionsKind, coptionsDevice) +} +func AtgRange1(ptr *Ctensor, start Cscalar, end Cscalar, optionsKind int32, optionsDevice int32){ +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_range1(ptr, start , end , coptionsKind, coptionsDevice) +} +func AtgRangeOut(ptr *Ctensor, out Ctensor, start Cscalar, end Cscalar){ +C.atg_range_out(ptr, out, start , end ) +} +func AtgReal(ptr *Ctensor, self Ctensor){ +C.atg_real(ptr, self) +} +func AtgReciprocal(ptr *Ctensor, self Ctensor){ +C.atg_reciprocal(ptr, self) +} +func AtgReciprocal_(ptr *Ctensor, self Ctensor){ +C.atg_reciprocal_(ptr, self) +} +func AtgReciprocalOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_reciprocal_out(ptr, out, self) +} +func AtgReflectionPad1d(ptr *Ctensor, self Ctensor, paddingData []int64, paddingLen int){ +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +C.atg_reflection_pad1d(ptr, self, cpaddingDataPtr, cpaddingLen) +} +func AtgReflectionPad1dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int){ +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +C.atg_reflection_pad1d_backward(ptr, gradOutput, self, cpaddingDataPtr, cpaddingLen) +} +func AtgReflectionPad1dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int){ +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +C.atg_reflection_pad1d_backward_out(ptr, gradInput, gradOutput, self, cpaddingDataPtr, cpaddingLen) +} +func AtgReflectionPad1dOut(ptr *Ctensor, out Ctensor, self Ctensor, paddingData []int64, paddingLen int){ +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +C.atg_reflection_pad1d_out(ptr, out, self, cpaddingDataPtr, cpaddingLen) +} +func AtgReflectionPad2d(ptr *Ctensor, self Ctensor, paddingData []int64, paddingLen int){ +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +C.atg_reflection_pad2d(ptr, self, cpaddingDataPtr, cpaddingLen) +} +func AtgReflectionPad2dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int){ +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +C.atg_reflection_pad2d_backward(ptr, gradOutput, self, cpaddingDataPtr, cpaddingLen) +} +func AtgReflectionPad2dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int){ +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +C.atg_reflection_pad2d_backward_out(ptr, gradInput, gradOutput, self, cpaddingDataPtr, cpaddingLen) +} +func AtgReflectionPad2dOut(ptr *Ctensor, out Ctensor, self Ctensor, paddingData []int64, paddingLen int){ +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +C.atg_reflection_pad2d_out(ptr, out, self, cpaddingDataPtr, cpaddingLen) +} +func AtgRelu(ptr *Ctensor, self Ctensor){ +C.atg_relu(ptr, self) +} +func AtgRelu_(ptr *Ctensor, self Ctensor){ +C.atg_relu_(ptr, self) +} +func AtgRemainder(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_remainder(ptr, self, other ) +} +func AtgRemainder1(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_remainder1(ptr, self, other) +} +func AtgRemainder_(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_remainder_(ptr, self, other ) +} +func AtgRemainder1_(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_remainder_1(ptr, self, other) +} +func AtgRemainderOut(ptr *Ctensor, out Ctensor, self Ctensor, other Cscalar){ +C.atg_remainder_out(ptr, out, self, other ) +} +func AtgRemainderOut1(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ +C.atg_remainder_out1(ptr, out, self, other) +} +func AtgRenorm(ptr *Ctensor, self Ctensor, p Cscalar, dim int64, maxnorm Cscalar){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_renorm(ptr, self, p , cdim, maxnorm ) +} +func AtgRenorm_(ptr *Ctensor, self Ctensor, p Cscalar, dim int64, maxnorm Cscalar){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_renorm_(ptr, self, p , cdim, maxnorm ) +} +func AtgRenormOut(ptr *Ctensor, out Ctensor, self Ctensor, p Cscalar, dim int64, maxnorm Cscalar){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_renorm_out(ptr, out, self, p , cdim, maxnorm ) +} +func AtgRepeat(ptr *Ctensor, self Ctensor, repeatsData []int64, repeatsLen int){ +crepeatsDataPtr := (*C.int64_t)(unsafe.Pointer(&repeatsData[0])) +crepeatsLen := *(*C.int)(unsafe.Pointer(&repeatsLen)) +C.atg_repeat(ptr, self, crepeatsDataPtr, crepeatsLen) +} +func AtgRepeatInterleave(ptr *Ctensor, repeats Ctensor){ +C.atg_repeat_interleave(ptr, repeats) +} +func AtgRepeatInterleave1(ptr *Ctensor, self Ctensor, repeats Ctensor, dim int64){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_repeat_interleave1(ptr, self, repeats, cdim) +} +func AtgRepeatInterleave2(ptr *Ctensor, self Ctensor, repeats int64, dim int64){ +crepeats := *(*C.int64_t)(unsafe.Pointer(&repeats)) +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_repeat_interleave2(ptr, self, crepeats, cdim) +} +func AtgReplicationPad1d(ptr *Ctensor, self Ctensor, paddingData []int64, paddingLen int){ +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +C.atg_replication_pad1d(ptr, self, cpaddingDataPtr, cpaddingLen) +} +func AtgReplicationPad1dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int){ +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +C.atg_replication_pad1d_backward(ptr, gradOutput, self, cpaddingDataPtr, cpaddingLen) +} +func AtgReplicationPad1dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int){ +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +C.atg_replication_pad1d_backward_out(ptr, gradInput, gradOutput, self, cpaddingDataPtr, cpaddingLen) +} +func AtgReplicationPad1dOut(ptr *Ctensor, out Ctensor, self Ctensor, paddingData []int64, paddingLen int){ +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +C.atg_replication_pad1d_out(ptr, out, self, cpaddingDataPtr, cpaddingLen) +} +func AtgReplicationPad2d(ptr *Ctensor, self Ctensor, paddingData []int64, paddingLen int){ +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +C.atg_replication_pad2d(ptr, self, cpaddingDataPtr, cpaddingLen) +} +func AtgReplicationPad2dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int){ +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +C.atg_replication_pad2d_backward(ptr, gradOutput, self, cpaddingDataPtr, cpaddingLen) +} +func AtgReplicationPad2dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int){ +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +C.atg_replication_pad2d_backward_out(ptr, gradInput, gradOutput, self, cpaddingDataPtr, cpaddingLen) +} +func AtgReplicationPad2dOut(ptr *Ctensor, out Ctensor, self Ctensor, paddingData []int64, paddingLen int){ +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +C.atg_replication_pad2d_out(ptr, out, self, cpaddingDataPtr, cpaddingLen) +} +func AtgReplicationPad3d(ptr *Ctensor, self Ctensor, paddingData []int64, paddingLen int){ +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +C.atg_replication_pad3d(ptr, self, cpaddingDataPtr, cpaddingLen) +} +func AtgReplicationPad3dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int){ +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +C.atg_replication_pad3d_backward(ptr, gradOutput, self, cpaddingDataPtr, cpaddingLen) +} +func AtgReplicationPad3dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int){ +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +C.atg_replication_pad3d_backward_out(ptr, gradInput, gradOutput, self, cpaddingDataPtr, cpaddingLen) +} +func AtgReplicationPad3dOut(ptr *Ctensor, out Ctensor, self Ctensor, paddingData []int64, paddingLen int){ +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +C.atg_replication_pad3d_out(ptr, out, self, cpaddingDataPtr, cpaddingLen) +} +func AtgRequiresGrad_(ptr *Ctensor, self Ctensor, requiresGrad int32){ +crequiresGrad := *(*C.int)(unsafe.Pointer(&requiresGrad)) +C.atg_requires_grad_(ptr, self, crequiresGrad) +} +func AtgReshape(ptr *Ctensor, self Ctensor, shapeData []int64, shapeLen int){ +cshapeDataPtr := (*C.int64_t)(unsafe.Pointer(&shapeData[0])) +cshapeLen := *(*C.int)(unsafe.Pointer(&shapeLen)) +C.atg_reshape(ptr, self, cshapeDataPtr, cshapeLen) +} +func AtgReshapeAs(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_reshape_as(ptr, self, other) +} +func AtgResize_(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int){ +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +C.atg_resize_(ptr, self, csizeDataPtr, csizeLen) +} +func AtgResizeAs_(ptr *Ctensor, self Ctensor, theTemplate Ctensor){ +C.atg_resize_as_(ptr, self, theTemplate) +} +func AtgRfft(ptr *Ctensor, self Ctensor, signalNdim int64, normalized int32, onesided int32){ +csignalNdim := *(*C.int64_t)(unsafe.Pointer(&signalNdim)) +cnormalized := *(*C.int)(unsafe.Pointer(&normalized)) +conesided := *(*C.int)(unsafe.Pointer(&onesided)) +C.atg_rfft(ptr, self, csignalNdim, cnormalized, conesided) +} +func AtgRnnRelu(ptr *Ctensor, input Ctensor, hx Ctensor, paramsData []Ctensor, paramsLen int, hasBiases int32, numLayers int64, dropout float64, train int32, bidirectional int32, batchFirst int32){ +cparamsDataPtr := (*Ctensor)(unsafe.Pointer(¶msData[0])) +cparamsLen := *(*C.int)(unsafe.Pointer(¶msLen)) +chasBiases := *(*C.int)(unsafe.Pointer(&hasBiases)) +cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) +cdropout := *(*C.double)(unsafe.Pointer(&dropout)) +ctrain := *(*C.int)(unsafe.Pointer(&train)) +cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) +cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst)) +C.atg_rnn_relu(ptr, input, hx, cparamsDataPtr, cparamsLen, chasBiases, cnumLayers, cdropout, ctrain, cbidirectional, cbatchFirst) +} +func AtgRnnRelu1(ptr *Ctensor, data Ctensor, batchSizes Ctensor, hx Ctensor, paramsData []Ctensor, paramsLen int, hasBiases int32, numLayers int64, dropout float64, train int32, bidirectional int32){ +cparamsDataPtr := (*Ctensor)(unsafe.Pointer(¶msData[0])) +cparamsLen := *(*C.int)(unsafe.Pointer(¶msLen)) +chasBiases := *(*C.int)(unsafe.Pointer(&hasBiases)) +cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) +cdropout := *(*C.double)(unsafe.Pointer(&dropout)) +ctrain := *(*C.int)(unsafe.Pointer(&train)) +cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) +C.atg_rnn_relu1(ptr, data, batchSizes, hx, cparamsDataPtr, cparamsLen, chasBiases, cnumLayers, cdropout, ctrain, cbidirectional) +} +func AtgRnnReluCell(ptr *Ctensor, input Ctensor, hx Ctensor, wIh Ctensor, wHh Ctensor, bIh Ctensor, bHh Ctensor){ +C.atg_rnn_relu_cell(ptr, input, hx, wIh, wHh, bIh, bHh) +} +func AtgRnnTanh(ptr *Ctensor, input Ctensor, hx Ctensor, paramsData []Ctensor, paramsLen int, hasBiases int32, numLayers int64, dropout float64, train int32, bidirectional int32, batchFirst int32){ +cparamsDataPtr := (*Ctensor)(unsafe.Pointer(¶msData[0])) +cparamsLen := *(*C.int)(unsafe.Pointer(¶msLen)) +chasBiases := *(*C.int)(unsafe.Pointer(&hasBiases)) +cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) +cdropout := *(*C.double)(unsafe.Pointer(&dropout)) +ctrain := *(*C.int)(unsafe.Pointer(&train)) +cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) +cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst)) +C.atg_rnn_tanh(ptr, input, hx, cparamsDataPtr, cparamsLen, chasBiases, cnumLayers, cdropout, ctrain, cbidirectional, cbatchFirst) +} +func AtgRnnTanh1(ptr *Ctensor, data Ctensor, batchSizes Ctensor, hx Ctensor, paramsData []Ctensor, paramsLen int, hasBiases int32, numLayers int64, dropout float64, train int32, bidirectional int32){ +cparamsDataPtr := (*Ctensor)(unsafe.Pointer(¶msData[0])) +cparamsLen := *(*C.int)(unsafe.Pointer(¶msLen)) +chasBiases := *(*C.int)(unsafe.Pointer(&hasBiases)) +cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) +cdropout := *(*C.double)(unsafe.Pointer(&dropout)) +ctrain := *(*C.int)(unsafe.Pointer(&train)) +cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) +C.atg_rnn_tanh1(ptr, data, batchSizes, hx, cparamsDataPtr, cparamsLen, chasBiases, cnumLayers, cdropout, ctrain, cbidirectional) +} +func AtgRnnTanhCell(ptr *Ctensor, input Ctensor, hx Ctensor, wIh Ctensor, wHh Ctensor, bIh Ctensor, bHh Ctensor){ +C.atg_rnn_tanh_cell(ptr, input, hx, wIh, wHh, bIh, bHh) +} +func AtgRoll(ptr *Ctensor, self Ctensor, shiftsData []int64, shiftsLen int, dimsData []int64, dimsLen int){ +cshiftsDataPtr := (*C.int64_t)(unsafe.Pointer(&shiftsData[0])) +cshiftsLen := *(*C.int)(unsafe.Pointer(&shiftsLen)) +cdimsDataPtr := (*C.int64_t)(unsafe.Pointer(&dimsData[0])) +cdimsLen := *(*C.int)(unsafe.Pointer(&dimsLen)) +C.atg_roll(ptr, self, cshiftsDataPtr, cshiftsLen, cdimsDataPtr, cdimsLen) +} +func AtgRot90(ptr *Ctensor, self Ctensor, k int64, dimsData []int64, dimsLen int){ +ck := *(*C.int64_t)(unsafe.Pointer(&k)) +cdimsDataPtr := (*C.int64_t)(unsafe.Pointer(&dimsData[0])) +cdimsLen := *(*C.int)(unsafe.Pointer(&dimsLen)) +C.atg_rot90(ptr, self, ck, cdimsDataPtr, cdimsLen) +} +func AtgRound(ptr *Ctensor, self Ctensor){ +C.atg_round(ptr, self) +} +func AtgRound_(ptr *Ctensor, self Ctensor){ +C.atg_round_(ptr, self) +} +func AtgRoundOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_round_out(ptr, out, self) +} +func AtgRrelu(ptr *Ctensor, self Ctensor, training int32){ +ctraining := *(*C.int)(unsafe.Pointer(&training)) +C.atg_rrelu(ptr, self, ctraining) +} +func AtgRrelu_(ptr *Ctensor, self Ctensor, training int32){ +ctraining := *(*C.int)(unsafe.Pointer(&training)) +C.atg_rrelu_(ptr, self, ctraining) +} +func AtgRreluWithNoise(ptr *Ctensor, self Ctensor, noise Ctensor, training int32){ +ctraining := *(*C.int)(unsafe.Pointer(&training)) +C.atg_rrelu_with_noise(ptr, self, noise, ctraining) +} +func AtgRreluWithNoise_(ptr *Ctensor, self Ctensor, noise Ctensor, training int32){ +ctraining := *(*C.int)(unsafe.Pointer(&training)) +C.atg_rrelu_with_noise_(ptr, self, noise, ctraining) +} +func AtgRreluWithNoiseBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, noise Ctensor, lower Cscalar, upper Cscalar, training int32, selfIsResult int32){ +ctraining := *(*C.int)(unsafe.Pointer(&training)) +cselfIsResult := *(*C.int)(unsafe.Pointer(&selfIsResult)) +C.atg_rrelu_with_noise_backward(ptr, gradOutput, self, noise, lower , upper , ctraining, cselfIsResult) +} +func AtgRreluWithNoiseOut(ptr *Ctensor, out Ctensor, self Ctensor, noise Ctensor, training int32){ +ctraining := *(*C.int)(unsafe.Pointer(&training)) +C.atg_rrelu_with_noise_out(ptr, out, self, noise, ctraining) +} +func AtgRsqrt(ptr *Ctensor, self Ctensor){ +C.atg_rsqrt(ptr, self) +} +func AtgRsqrt_(ptr *Ctensor, self Ctensor){ +C.atg_rsqrt_(ptr, self) +} +func AtgRsqrtOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_rsqrt_out(ptr, out, self) +} +func AtgRsub(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_rsub(ptr, self, other) +} +func AtgRsub1(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_rsub1(ptr, self, other ) +} +func AtgScalarTensor(ptr *Ctensor, s Cscalar, optionsKind int32, optionsDevice int32){ +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_scalar_tensor(ptr, s , coptionsKind, coptionsDevice) +} +func AtgScatter(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, src Ctensor){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_scatter(ptr, self, cdim, index, src) +} +func AtgScatter1(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, value Cscalar){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_scatter1(ptr, self, cdim, index, value ) +} +func AtgScatter_(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, src Ctensor){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_scatter_(ptr, self, cdim, index, src) +} +func AtgScatter1_(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, value Cscalar){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_scatter_1(ptr, self, cdim, index, value ) +} +func AtgScatterAdd(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, src Ctensor){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_scatter_add(ptr, self, cdim, index, src) +} +func AtgScatterAdd_(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, src Ctensor){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_scatter_add_(ptr, self, cdim, index, src) +} +func AtgSelect(ptr *Ctensor, self Ctensor, dim int64, index int64){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +cindex := *(*C.int64_t)(unsafe.Pointer(&index)) +C.atg_select(ptr, self, cdim, cindex) +} +func AtgSelu(ptr *Ctensor, self Ctensor){ +C.atg_selu(ptr, self) +} +func AtgSelu_(ptr *Ctensor, self Ctensor){ +C.atg_selu_(ptr, self) +} +func AtgSet_(ptr *Ctensor, self Ctensor){ +C.atg_set_(ptr, self) +} +func AtgSet1_(ptr *Ctensor, self Ctensor, source Ctensor){ +C.atg_set_1(ptr, self, source) +} +func AtgSetRequiresGrad(ptr *Ctensor, self Ctensor, r int32){ +cr := *(*C.int)(unsafe.Pointer(&r)) +C.atg_set_requires_grad(ptr, self, cr) +} +func AtgSigmoid(ptr *Ctensor, self Ctensor){ +C.atg_sigmoid(ptr, self) +} +func AtgSigmoid_(ptr *Ctensor, self Ctensor){ +C.atg_sigmoid_(ptr, self) +} +func AtgSigmoidBackward(ptr *Ctensor, gradOutput Ctensor, output Ctensor){ +C.atg_sigmoid_backward(ptr, gradOutput, output) +} +func AtgSigmoidBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, output Ctensor){ +C.atg_sigmoid_backward_out(ptr, gradInput, gradOutput, output) +} +func AtgSigmoidOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_sigmoid_out(ptr, out, self) +} +func AtgSign(ptr *Ctensor, self Ctensor){ +C.atg_sign(ptr, self) +} +func AtgSign_(ptr *Ctensor, self Ctensor){ +C.atg_sign_(ptr, self) +} +func AtgSignOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_sign_out(ptr, out, self) +} +func AtgSin(ptr *Ctensor, self Ctensor){ +C.atg_sin(ptr, self) +} +func AtgSin_(ptr *Ctensor, self Ctensor){ +C.atg_sin_(ptr, self) +} +func AtgSinOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_sin_out(ptr, out, self) +} +func AtgSinh(ptr *Ctensor, self Ctensor){ +C.atg_sinh(ptr, self) +} +func AtgSinh_(ptr *Ctensor, self Ctensor){ +C.atg_sinh_(ptr, self) +} +func AtgSinhOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_sinh_out(ptr, out, self) +} +func AtgSlice(ptr *Ctensor, self Ctensor, dim int64, start int64, end int64, step int64){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +cstart := *(*C.int64_t)(unsafe.Pointer(&start)) +cend := *(*C.int64_t)(unsafe.Pointer(&end)) +cstep := *(*C.int64_t)(unsafe.Pointer(&step)) +C.atg_slice(ptr, self, cdim, cstart, cend, cstep) +} +func AtgSlogdet(ptr *Ctensor, self Ctensor){ +C.atg_slogdet(ptr, self) +} +func AtgSlowConv3d(ptr *Ctensor, self Ctensor, weight Ctensor, kernelSizeData []int64, kernelSizeLen int, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +C.atg_slow_conv3d(ptr, self, weight, ckernelSizeDataPtr, ckernelSizeLen, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen) +} +func AtgSlowConv3dOut(ptr *Ctensor, out Ctensor, self Ctensor, weight Ctensor, kernelSizeData []int64, kernelSizeLen int, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +C.atg_slow_conv3d_out(ptr, out, self, weight, ckernelSizeDataPtr, ckernelSizeLen, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen) +} +func AtgSlowConvDilated2d(ptr *Ctensor, self Ctensor, weight Ctensor, kernelSizeData []int64, kernelSizeLen int, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +C.atg_slow_conv_dilated2d(ptr, self, weight, ckernelSizeDataPtr, ckernelSizeLen, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen) +} +func AtgSlowConvDilated3d(ptr *Ctensor, self Ctensor, weight Ctensor, kernelSizeData []int64, kernelSizeLen int, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +C.atg_slow_conv_dilated3d(ptr, self, weight, ckernelSizeDataPtr, ckernelSizeLen, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen) +} +func AtgSlowConvTranspose2d(ptr *Ctensor, self Ctensor, weight Ctensor, kernelSizeData []int64, kernelSizeLen int, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, outputPaddingData []int64, outputPaddingLen int, dilationData []int64, dilationLen int){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) +coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +C.atg_slow_conv_transpose2d(ptr, self, weight, ckernelSizeDataPtr, ckernelSizeLen, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, coutputPaddingDataPtr, coutputPaddingLen, cdilationDataPtr, cdilationLen) +} +func AtgSlowConvTranspose2dOut(ptr *Ctensor, out Ctensor, self Ctensor, weight Ctensor, kernelSizeData []int64, kernelSizeLen int, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, outputPaddingData []int64, outputPaddingLen int, dilationData []int64, dilationLen int){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) +coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +C.atg_slow_conv_transpose2d_out(ptr, out, self, weight, ckernelSizeDataPtr, ckernelSizeLen, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, coutputPaddingDataPtr, coutputPaddingLen, cdilationDataPtr, cdilationLen) +} +func AtgSlowConvTranspose3d(ptr *Ctensor, self Ctensor, weight Ctensor, kernelSizeData []int64, kernelSizeLen int, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, outputPaddingData []int64, outputPaddingLen int, dilationData []int64, dilationLen int){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) +coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +C.atg_slow_conv_transpose3d(ptr, self, weight, ckernelSizeDataPtr, ckernelSizeLen, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, coutputPaddingDataPtr, coutputPaddingLen, cdilationDataPtr, cdilationLen) +} +func AtgSlowConvTranspose3dOut(ptr *Ctensor, out Ctensor, self Ctensor, weight Ctensor, kernelSizeData []int64, kernelSizeLen int, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, outputPaddingData []int64, outputPaddingLen int, dilationData []int64, dilationLen int){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) +coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +C.atg_slow_conv_transpose3d_out(ptr, out, self, weight, ckernelSizeDataPtr, ckernelSizeLen, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, coutputPaddingDataPtr, coutputPaddingLen, cdilationDataPtr, cdilationLen) +} +func AtgSmm(ptr *Ctensor, self Ctensor, mat2 Ctensor){ +C.atg_smm(ptr, self, mat2) +} +func AtgSmoothL1Loss(ptr *Ctensor, self Ctensor, target Ctensor, reduction int64){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +C.atg_smooth_l1_loss(ptr, self, target, creduction) +} +func AtgSmoothL1LossBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, reduction int64){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +C.atg_smooth_l1_loss_backward(ptr, gradOutput, self, target, creduction) +} +func AtgSmoothL1LossBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, reduction int64){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +C.atg_smooth_l1_loss_backward_out(ptr, gradInput, gradOutput, self, target, creduction) +} +func AtgSmoothL1LossOut(ptr *Ctensor, out Ctensor, self Ctensor, target Ctensor, reduction int64){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +C.atg_smooth_l1_loss_out(ptr, out, self, target, creduction) +} +func AtgSoftMarginLoss(ptr *Ctensor, self Ctensor, target Ctensor, reduction int64){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +C.atg_soft_margin_loss(ptr, self, target, creduction) +} +func AtgSoftMarginLossBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, reduction int64){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +C.atg_soft_margin_loss_backward(ptr, gradOutput, self, target, creduction) +} +func AtgSoftMarginLossBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, reduction int64){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +C.atg_soft_margin_loss_backward_out(ptr, gradInput, gradOutput, self, target, creduction) +} +func AtgSoftMarginLossOut(ptr *Ctensor, out Ctensor, self Ctensor, target Ctensor, reduction int64){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +C.atg_soft_margin_loss_out(ptr, out, self, target, creduction) +} +func AtgSoftmax(ptr *Ctensor, self Ctensor, dim int64, dtype int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +cdtype := *(*C.int)(unsafe.Pointer(&dtype)) +C.atg_softmax(ptr, self, cdim, cdtype) +} +func AtgSoftplus(ptr *Ctensor, self Ctensor){ +C.atg_softplus(ptr, self) +} +func AtgSoftplusBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, beta Cscalar, threshold Cscalar, output Ctensor){ +C.atg_softplus_backward(ptr, gradOutput, self, beta , threshold , output) +} +func AtgSoftplusBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, beta Cscalar, threshold Cscalar, output Ctensor){ +C.atg_softplus_backward_out(ptr, gradInput, gradOutput, self, beta , threshold , output) +} +func AtgSoftplusOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_softplus_out(ptr, out, self) +} +func AtgSoftshrink(ptr *Ctensor, self Ctensor){ +C.atg_softshrink(ptr, self) +} +func AtgSoftshrinkBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, lambd Cscalar){ +C.atg_softshrink_backward(ptr, gradOutput, self, lambd ) +} +func AtgSoftshrinkBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, lambd Cscalar){ +C.atg_softshrink_backward_out(ptr, gradInput, gradOutput, self, lambd ) +} +func AtgSoftshrinkOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_softshrink_out(ptr, out, self) +} +func AtgSolve(ptr *Ctensor, self Ctensor, a Ctensor){ +C.atg_solve(ptr, self, a) +} +func AtgSolveOut(ptr *Ctensor, solution Ctensor, lu Ctensor, self Ctensor, a Ctensor){ +C.atg_solve_out(ptr, solution, lu, self, a) +} +func AtgSort(ptr *Ctensor, self Ctensor, dim int64, descending int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +cdescending := *(*C.int)(unsafe.Pointer(&descending)) +C.atg_sort(ptr, self, cdim, cdescending) +} +func AtgSortOut(ptr *Ctensor, values Ctensor, indices Ctensor, self Ctensor, dim int64, descending int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +cdescending := *(*C.int)(unsafe.Pointer(&descending)) +C.atg_sort_out(ptr, values, indices, self, cdim, cdescending) +} +func AtgSparseCooTensor(ptr *Ctensor, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32){ +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_sparse_coo_tensor(ptr, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice) +} +func AtgSparseCooTensor1(ptr *Ctensor, indices Ctensor, values Ctensor, optionsKind int32, optionsDevice int32){ +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_sparse_coo_tensor1(ptr, indices, values, coptionsKind, coptionsDevice) +} +func AtgSparseCooTensor2(ptr *Ctensor, indices Ctensor, values Ctensor, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32){ +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_sparse_coo_tensor2(ptr, indices, values, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice) +} +func AtgSparseMask(ptr *Ctensor, self Ctensor, mask Ctensor){ +C.atg_sparse_mask(ptr, self, mask) +} +func AtgSparseResize_(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int, sparseDim int64, denseDim int64){ +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +csparseDim := *(*C.int64_t)(unsafe.Pointer(&sparseDim)) +cdenseDim := *(*C.int64_t)(unsafe.Pointer(&denseDim)) +C.atg_sparse_resize_(ptr, self, csizeDataPtr, csizeLen, csparseDim, cdenseDim) +} +func AtgSparseResizeAndClear_(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int, sparseDim int64, denseDim int64){ +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +csparseDim := *(*C.int64_t)(unsafe.Pointer(&sparseDim)) +cdenseDim := *(*C.int64_t)(unsafe.Pointer(&denseDim)) +C.atg_sparse_resize_and_clear_(ptr, self, csizeDataPtr, csizeLen, csparseDim, cdenseDim) +} + + +func AtgSqrt(ptr *Ctensor, self Ctensor){ +C.atg_sqrt(ptr, self) +} +func AtgSqrt_(ptr *Ctensor, self Ctensor){ +C.atg_sqrt_(ptr, self) +} +func AtgSqrtOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_sqrt_out(ptr, out, self) +} +func AtgSquare(ptr *Ctensor, self Ctensor){ +C.atg_square(ptr, self) +} +func AtgSquare_(ptr *Ctensor, self Ctensor){ +C.atg_square_(ptr, self) +} +func AtgSqueeze(ptr *Ctensor, self Ctensor){ +C.atg_squeeze(ptr, self) +} +func AtgSqueeze1(ptr *Ctensor, self Ctensor, dim int64){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_squeeze1(ptr, self, cdim) +} +func AtgSqueeze_(ptr *Ctensor, self Ctensor){ +C.atg_squeeze_(ptr, self) +} +func AtgSqueeze1_(ptr *Ctensor, self Ctensor, dim int64){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_squeeze_1(ptr, self, cdim) +} +func AtgSspaddmm(ptr *Ctensor, self Ctensor, mat1 Ctensor, mat2 Ctensor){ +C.atg_sspaddmm(ptr, self, mat1, mat2) +} +func AtgSspaddmmOut(ptr *Ctensor, out Ctensor, self Ctensor, mat1 Ctensor, mat2 Ctensor){ +C.atg_sspaddmm_out(ptr, out, self, mat1, mat2) +} +func AtgStack(ptr *Ctensor, tensorsData []Ctensor, tensorsLen int, dim int64){ +ctensorsDataPtr := (*Ctensor)(unsafe.Pointer(&tensorsData[0])) +ctensorsLen := *(*C.int)(unsafe.Pointer(&tensorsLen)) +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_stack(ptr, ctensorsDataPtr, ctensorsLen, cdim) +} +func AtgStackOut(ptr *Ctensor, out Ctensor, tensorsData []Ctensor, tensorsLen int, dim int64){ +ctensorsDataPtr := (*Ctensor)(unsafe.Pointer(&tensorsData[0])) +ctensorsLen := *(*C.int)(unsafe.Pointer(&tensorsLen)) +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_stack_out(ptr, out, ctensorsDataPtr, ctensorsLen, cdim) +} +func AtgStd(ptr *Ctensor, self Ctensor, unbiased int32){ +cunbiased := *(*C.int)(unsafe.Pointer(&unbiased)) +C.atg_std(ptr, self, cunbiased) +} +func AtgStd1(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, unbiased int32, keepdim int32){ +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +cunbiased := *(*C.int)(unsafe.Pointer(&unbiased)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_std1(ptr, self, cdimDataPtr, cdimLen, cunbiased, ckeepdim) +} +func AtgStdMean(ptr *Ctensor, self Ctensor, unbiased int32){ +cunbiased := *(*C.int)(unsafe.Pointer(&unbiased)) +C.atg_std_mean(ptr, self, cunbiased) +} +func AtgStdMean1(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, unbiased int32, keepdim int32){ +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +cunbiased := *(*C.int)(unsafe.Pointer(&unbiased)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_std_mean1(ptr, self, cdimDataPtr, cdimLen, cunbiased, ckeepdim) +} +func AtgStdOut(ptr *Ctensor, out Ctensor, self Ctensor, dimData []int64, dimLen int, unbiased int32, keepdim int32){ +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +cunbiased := *(*C.int)(unsafe.Pointer(&unbiased)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_std_out(ptr, out, self, cdimDataPtr, cdimLen, cunbiased, ckeepdim) +} +func AtgStft(ptr *Ctensor, self Ctensor, nFft int64, hopLength int64, winLength int64, window Ctensor, normalized int32, onesided int32){ +cnFft := *(*C.int64_t)(unsafe.Pointer(&nFft)) +chopLength := *(*C.int64_t)(unsafe.Pointer(&hopLength)) +cwinLength := *(*C.int64_t)(unsafe.Pointer(&winLength)) +cnormalized := *(*C.int)(unsafe.Pointer(&normalized)) +conesided := *(*C.int)(unsafe.Pointer(&onesided)) +C.atg_stft(ptr, self, cnFft, chopLength, cwinLength, window, cnormalized, conesided) +} +func AtgSub(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_sub(ptr, self, other) +} +func AtgSub1(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_sub1(ptr, self, other ) +} +func AtgSub_(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_sub_(ptr, self, other) +} +func AtgSub1_(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_sub_1(ptr, self, other ) +} +func AtgSubOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ +C.atg_sub_out(ptr, out, self, other) +} +func AtgSum(ptr *Ctensor, self Ctensor, dtype int32){ +cdtype := *(*C.int)(unsafe.Pointer(&dtype)) +C.atg_sum(ptr, self, cdtype) +} +func AtgSum1(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32, dtype int32){ +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +cdtype := *(*C.int)(unsafe.Pointer(&dtype)) +C.atg_sum1(ptr, self, cdimDataPtr, cdimLen, ckeepdim, cdtype) +} +func AtgSumOut(ptr *Ctensor, out Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32, dtype int32){ +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +cdtype := *(*C.int)(unsafe.Pointer(&dtype)) +C.atg_sum_out(ptr, out, self, cdimDataPtr, cdimLen, ckeepdim, cdtype) +} +func AtgSumToSize(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int){ +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +C.atg_sum_to_size(ptr, self, csizeDataPtr, csizeLen) +} +func AtgSvd(ptr *Ctensor, self Ctensor, some int32, computeUv int32){ +csome := *(*C.int)(unsafe.Pointer(&some)) +ccomputeUv := *(*C.int)(unsafe.Pointer(&computeUv)) +C.atg_svd(ptr, self, csome, ccomputeUv) +} +func AtgSvdOut(ptr *Ctensor, u Ctensor, s Ctensor, v Ctensor, self Ctensor, some int32, computeUv int32){ +csome := *(*C.int)(unsafe.Pointer(&some)) +ccomputeUv := *(*C.int)(unsafe.Pointer(&computeUv)) +C.atg_svd_out(ptr, u, s, v, self, csome, ccomputeUv) +} +func AtgSymeig(ptr *Ctensor, self Ctensor, eigenvectors int32, upper int32){ +ceigenvectors := *(*C.int)(unsafe.Pointer(&eigenvectors)) +cupper := *(*C.int)(unsafe.Pointer(&upper)) +C.atg_symeig(ptr, self, ceigenvectors, cupper) +} +func AtgSymeigOut(ptr *Ctensor, e Ctensor, v Ctensor, self Ctensor, eigenvectors int32, upper int32){ +ceigenvectors := *(*C.int)(unsafe.Pointer(&eigenvectors)) +cupper := *(*C.int)(unsafe.Pointer(&upper)) +C.atg_symeig_out(ptr, e, v, self, ceigenvectors, cupper) +} +func AtgT(ptr *Ctensor, self Ctensor){ +C.atg_t(ptr, self) +} +func AtgT_(ptr *Ctensor, self Ctensor){ +C.atg_t_(ptr, self) +} +func AtgTake(ptr *Ctensor, self Ctensor, index Ctensor){ +C.atg_take(ptr, self, index) +} +func AtgTakeOut(ptr *Ctensor, out Ctensor, self Ctensor, index Ctensor){ +C.atg_take_out(ptr, out, self, index) +} +func AtgTan(ptr *Ctensor, self Ctensor){ +C.atg_tan(ptr, self) +} +func AtgTan_(ptr *Ctensor, self Ctensor){ +C.atg_tan_(ptr, self) +} +func AtgTanOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_tan_out(ptr, out, self) +} +func AtgTanh(ptr *Ctensor, self Ctensor){ +C.atg_tanh(ptr, self) +} +func AtgTanh_(ptr *Ctensor, self Ctensor){ +C.atg_tanh_(ptr, self) +} +func AtgTanhBackward(ptr *Ctensor, gradOutput Ctensor, output Ctensor){ +C.atg_tanh_backward(ptr, gradOutput, output) +} +func AtgTanhBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, output Ctensor){ +C.atg_tanh_backward_out(ptr, gradInput, gradOutput, output) +} +func AtgTanhOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_tanh_out(ptr, out, self) +} +func AtgTensordot(ptr *Ctensor, self Ctensor, other Ctensor, dimsSelfData []int64, dimsSelfLen int, dimsOtherData []int64, dimsOtherLen int){ +cdimsSelfDataPtr := (*C.int64_t)(unsafe.Pointer(&dimsSelfData[0])) +cdimsSelfLen := *(*C.int)(unsafe.Pointer(&dimsSelfLen)) +cdimsOtherDataPtr := (*C.int64_t)(unsafe.Pointer(&dimsOtherData[0])) +cdimsOtherLen := *(*C.int)(unsafe.Pointer(&dimsOtherLen)) +C.atg_tensordot(ptr, self, other, cdimsSelfDataPtr, cdimsSelfLen, cdimsOtherDataPtr, cdimsOtherLen) +} +func AtgThreshold(ptr *Ctensor, self Ctensor, threshold Cscalar, value Cscalar){ +C.atg_threshold(ptr, self, threshold , value ) +} +func AtgThreshold_(ptr *Ctensor, self Ctensor, threshold Cscalar, value Cscalar){ +C.atg_threshold_(ptr, self, threshold , value ) +} +func AtgThresholdBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, threshold Cscalar){ +C.atg_threshold_backward(ptr, gradOutput, self, threshold ) +} +func AtgThresholdOut(ptr *Ctensor, out Ctensor, self Ctensor, threshold Cscalar, value Cscalar){ +C.atg_threshold_out(ptr, out, self, threshold , value ) +} +func AtgTo(ptr *Ctensor, self Ctensor, device int32){ +cdevice := *(*C.int)(unsafe.Pointer(&device)) +C.atg_to(ptr, self, cdevice) +} +func AtgTo1(ptr *Ctensor, self Ctensor, optionsKind int32, optionsDevice int32, nonBlocking int32, copy int32){ +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) +ccopy := *(*C.int)(unsafe.Pointer(©)) +C.atg_to1(ptr, self, coptionsKind, coptionsDevice, cnonBlocking, ccopy) +} +func AtgTo2(ptr *Ctensor, self Ctensor, dtype int32, nonBlocking int32, copy int32){ +cdtype := *(*C.int)(unsafe.Pointer(&dtype)) +cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) +ccopy := *(*C.int)(unsafe.Pointer(©)) +C.atg_to2(ptr, self, cdtype, cnonBlocking, ccopy) +} +func AtgTo3(ptr *Ctensor, self Ctensor, other Ctensor, nonBlocking int32, copy int32){ +cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) +ccopy := *(*C.int)(unsafe.Pointer(©)) +C.atg_to3(ptr, self, other, cnonBlocking, ccopy) +} +func AtgTo4(ptr *Ctensor, self Ctensor, device int32, dtype int32, nonBlocking int32, copy int32){ +cdevice := *(*C.int)(unsafe.Pointer(&device)) +cdtype := *(*C.int)(unsafe.Pointer(&dtype)) +cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) +ccopy := *(*C.int)(unsafe.Pointer(©)) +C.atg_to4(ptr, self, cdevice, cdtype, cnonBlocking, ccopy) +} +func AtgToDense(ptr *Ctensor, self Ctensor){ +C.atg_to_dense(ptr, self) +} +func AtgToDenseBackward(ptr *Ctensor, grad Ctensor, input Ctensor){ +C.atg_to_dense_backward(ptr, grad, input) +} +func AtgToMkldnn(ptr *Ctensor, self Ctensor){ +C.atg_to_mkldnn(ptr, self) +} +func AtgToMkldnnBackward(ptr *Ctensor, grad Ctensor, input Ctensor){ +C.atg_to_mkldnn_backward(ptr, grad, input) +} +func AtgToSparse(ptr *Ctensor, self Ctensor){ +C.atg_to_sparse(ptr, self) +} +func AtgToSparse1(ptr *Ctensor, self Ctensor, sparseDim int64){ +csparseDim := *(*C.int64_t)(unsafe.Pointer(&sparseDim)) +C.atg_to_sparse1(ptr, self, csparseDim) +} +func AtgTopk(ptr *Ctensor, self Ctensor, k int64, dim int64, largest int32, sorted int32){ +ck := *(*C.int64_t)(unsafe.Pointer(&k)) +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +clargest := *(*C.int)(unsafe.Pointer(&largest)) +csorted := *(*C.int)(unsafe.Pointer(&sorted)) +C.atg_topk(ptr, self, ck, cdim, clargest, csorted) +} +func AtgTopkOut(ptr *Ctensor, values Ctensor, indices Ctensor, self Ctensor, k int64, dim int64, largest int32, sorted int32){ +ck := *(*C.int64_t)(unsafe.Pointer(&k)) +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +clargest := *(*C.int)(unsafe.Pointer(&largest)) +csorted := *(*C.int)(unsafe.Pointer(&sorted)) +C.atg_topk_out(ptr, values, indices, self, ck, cdim, clargest, csorted) +} +func AtgTotype(ptr *Ctensor, self Ctensor, scalarType int32){ +cscalarType := *(*C.int)(unsafe.Pointer(&scalarType)) +C.atg_totype(ptr, self, cscalarType) +} +func AtgTrace(ptr *Ctensor, self Ctensor){ +C.atg_trace(ptr, self) +} +func AtgTranspose(ptr *Ctensor, self Ctensor, dim0 int64, dim1 int64){ +cdim0 := *(*C.int64_t)(unsafe.Pointer(&dim0)) +cdim1 := *(*C.int64_t)(unsafe.Pointer(&dim1)) +C.atg_transpose(ptr, self, cdim0, cdim1) +} +func AtgTranspose_(ptr *Ctensor, self Ctensor, dim0 int64, dim1 int64){ +cdim0 := *(*C.int64_t)(unsafe.Pointer(&dim0)) +cdim1 := *(*C.int64_t)(unsafe.Pointer(&dim1)) +C.atg_transpose_(ptr, self, cdim0, cdim1) +} +func AtgTrapz(ptr *Ctensor, y Ctensor, x Ctensor, dim int64){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_trapz(ptr, y, x, cdim) +} +func AtgTrapz1(ptr *Ctensor, y Ctensor, dx float64, dim int64){ +cdx := *(*C.double)(unsafe.Pointer(&dx)) +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_trapz1(ptr, y, cdx, cdim) +} +func AtgTriangularSolve(ptr *Ctensor, self Ctensor, a Ctensor, upper int32, transpose int32, unitriangular int32){ +cupper := *(*C.int)(unsafe.Pointer(&upper)) +ctranspose := *(*C.int)(unsafe.Pointer(&transpose)) +cunitriangular := *(*C.int)(unsafe.Pointer(&unitriangular)) +C.atg_triangular_solve(ptr, self, a, cupper, ctranspose, cunitriangular) +} +func AtgTriangularSolveOut(ptr *Ctensor, x Ctensor, m Ctensor, self Ctensor, a Ctensor, upper int32, transpose int32, unitriangular int32){ +cupper := *(*C.int)(unsafe.Pointer(&upper)) +ctranspose := *(*C.int)(unsafe.Pointer(&transpose)) +cunitriangular := *(*C.int)(unsafe.Pointer(&unitriangular)) +C.atg_triangular_solve_out(ptr, x, m, self, a, cupper, ctranspose, cunitriangular) +} +func AtgTril(ptr *Ctensor, self Ctensor, diagonal int64){ +cdiagonal := *(*C.int64_t)(unsafe.Pointer(&diagonal)) +C.atg_tril(ptr, self, cdiagonal) +} +func AtgTril_(ptr *Ctensor, self Ctensor, diagonal int64){ +cdiagonal := *(*C.int64_t)(unsafe.Pointer(&diagonal)) +C.atg_tril_(ptr, self, cdiagonal) +} +func AtgTrilIndices(ptr *Ctensor, row int64, col int64, offset int64, optionsKind int32, optionsDevice int32){ +crow := *(*C.int64_t)(unsafe.Pointer(&row)) +ccol := *(*C.int64_t)(unsafe.Pointer(&col)) +coffset := *(*C.int64_t)(unsafe.Pointer(&offset)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_tril_indices(ptr, crow, ccol, coffset, coptionsKind, coptionsDevice) +} +func AtgTrilOut(ptr *Ctensor, out Ctensor, self Ctensor, diagonal int64){ +cdiagonal := *(*C.int64_t)(unsafe.Pointer(&diagonal)) +C.atg_tril_out(ptr, out, self, cdiagonal) +} +func AtgTripletMarginLoss(ptr *Ctensor, anchor Ctensor, positive Ctensor, negative Ctensor, margin float64, p float64, eps float64, swap int32, reduction int64){ +cmargin := *(*C.double)(unsafe.Pointer(&margin)) +cp := *(*C.double)(unsafe.Pointer(&p)) +ceps := *(*C.double)(unsafe.Pointer(&eps)) +cswap := *(*C.int)(unsafe.Pointer(&swap)) +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +C.atg_triplet_margin_loss(ptr, anchor, positive, negative, cmargin, cp, ceps, cswap, creduction) +} +func AtgTriu(ptr *Ctensor, self Ctensor, diagonal int64){ +cdiagonal := *(*C.int64_t)(unsafe.Pointer(&diagonal)) +C.atg_triu(ptr, self, cdiagonal) +} +func AtgTriu_(ptr *Ctensor, self Ctensor, diagonal int64){ +cdiagonal := *(*C.int64_t)(unsafe.Pointer(&diagonal)) +C.atg_triu_(ptr, self, cdiagonal) +} +func AtgTriuIndices(ptr *Ctensor, row int64, col int64, offset int64, optionsKind int32, optionsDevice int32){ +crow := *(*C.int64_t)(unsafe.Pointer(&row)) +ccol := *(*C.int64_t)(unsafe.Pointer(&col)) +coffset := *(*C.int64_t)(unsafe.Pointer(&offset)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_triu_indices(ptr, crow, ccol, coffset, coptionsKind, coptionsDevice) +} +func AtgTriuOut(ptr *Ctensor, out Ctensor, self Ctensor, diagonal int64){ +cdiagonal := *(*C.int64_t)(unsafe.Pointer(&diagonal)) +C.atg_triu_out(ptr, out, self, cdiagonal) +} +func AtgTrueDivide(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_true_divide(ptr, self, other) +} +func AtgTrueDivide1(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_true_divide1(ptr, self, other ) +} +func AtgTrueDivide_(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_true_divide_(ptr, self, other) +} +func AtgTrueDivide1_(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_true_divide_1(ptr, self, other ) +} +func AtgTrueDivideOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ +C.atg_true_divide_out(ptr, out, self, other) +} +func AtgTrunc(ptr *Ctensor, self Ctensor){ +C.atg_trunc(ptr, self) +} +func AtgTrunc_(ptr *Ctensor, self Ctensor){ +C.atg_trunc_(ptr, self) +} +func AtgTruncOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_trunc_out(ptr, out, self) +} +func AtgTypeAs(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_type_as(ptr, self, other) +} + +func AtgUnfold(ptr *Ctensor, self Ctensor, dimension int64, size int64, step int64){ +cdimension := *(*C.int64_t)(unsafe.Pointer(&dimension)) +csize := *(*C.int64_t)(unsafe.Pointer(&size)) +cstep := *(*C.int64_t)(unsafe.Pointer(&step)) +C.atg_unfold(ptr, self, cdimension, csize, cstep) +} +func AtgUniform_(ptr *Ctensor, self Ctensor, from float64, to float64){ +cfrom := *(*C.double)(unsafe.Pointer(&from)) +cto := *(*C.double)(unsafe.Pointer(&to)) +C.atg_uniform_(ptr, self, cfrom, cto) +} +func AtgUniqueConsecutive(ptr *Ctensor, self Ctensor, returnInverse int32, returnCounts int32, dim int64){ +creturnInverse := *(*C.int)(unsafe.Pointer(&returnInverse)) +creturnCounts := *(*C.int)(unsafe.Pointer(&returnCounts)) +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_unique_consecutive(ptr, self, creturnInverse, creturnCounts, cdim) +} +func AtgUniqueDim(ptr *Ctensor, self Ctensor, dim int64, sorted int32, returnInverse int32, returnCounts int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +csorted := *(*C.int)(unsafe.Pointer(&sorted)) +creturnInverse := *(*C.int)(unsafe.Pointer(&returnInverse)) +creturnCounts := *(*C.int)(unsafe.Pointer(&returnCounts)) +C.atg_unique_dim(ptr, self, cdim, csorted, creturnInverse, creturnCounts) +} +func AtgUniqueDimConsecutive(ptr *Ctensor, self Ctensor, dim int64, returnInverse int32, returnCounts int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +creturnInverse := *(*C.int)(unsafe.Pointer(&returnInverse)) +creturnCounts := *(*C.int)(unsafe.Pointer(&returnCounts)) +C.atg_unique_dim_consecutive(ptr, self, cdim, creturnInverse, creturnCounts) +} +func AtgUnsqueeze(ptr *Ctensor, self Ctensor, dim int64){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_unsqueeze(ptr, self, cdim) +} +func AtgUnsqueeze_(ptr *Ctensor, self Ctensor, dim int64){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_unsqueeze_(ptr, self, cdim) +} +func AtgUpsampleBicubic2d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, alignCorners int32, scalesH float64, scalesW float64){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) +cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) +cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) +C.atg_upsample_bicubic2d(ptr, self, coutputSizeDataPtr, coutputSizeLen, calignCorners, cscalesH, cscalesW) +} +func AtgUpsampleBicubic2dBackward(ptr *Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, alignCorners int32, scalesH float64, scalesW float64){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) +cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) +calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) +cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) +cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) +C.atg_upsample_bicubic2d_backward(ptr, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, calignCorners, cscalesH, cscalesW) +} +func AtgUpsampleBicubic2dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, alignCorners int32, scalesH float64, scalesW float64){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) +cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) +calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) +cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) +cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) +C.atg_upsample_bicubic2d_backward_out(ptr, gradInput, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, calignCorners, cscalesH, cscalesW) +} +func AtgUpsampleBicubic2dOut(ptr *Ctensor, out Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, alignCorners int32, scalesH float64, scalesW float64){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) +cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) +cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) +C.atg_upsample_bicubic2d_out(ptr, out, self, coutputSizeDataPtr, coutputSizeLen, calignCorners, cscalesH, cscalesW) +} +func AtgUpsampleBilinear2d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, alignCorners int32, scalesH float64, scalesW float64){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) +cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) +cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) +C.atg_upsample_bilinear2d(ptr, self, coutputSizeDataPtr, coutputSizeLen, calignCorners, cscalesH, cscalesW) +} +func AtgUpsampleBilinear2dBackward(ptr *Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, alignCorners int32, scalesH float64, scalesW float64){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) +cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) +calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) +cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) +cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) +C.atg_upsample_bilinear2d_backward(ptr, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, calignCorners, cscalesH, cscalesW) +} +func AtgUpsampleBilinear2dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, alignCorners int32, scalesH float64, scalesW float64){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) +cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) +calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) +cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) +cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) +C.atg_upsample_bilinear2d_backward_out(ptr, gradInput, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, calignCorners, cscalesH, cscalesW) +} +func AtgUpsampleBilinear2dOut(ptr *Ctensor, out Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, alignCorners int32, scalesH float64, scalesW float64){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) +cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) +cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) +C.atg_upsample_bilinear2d_out(ptr, out, self, coutputSizeDataPtr, coutputSizeLen, calignCorners, cscalesH, cscalesW) +} +func AtgUpsampleLinear1d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, alignCorners int32, scales float64){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) +cscales := *(*C.double)(unsafe.Pointer(&scales)) +C.atg_upsample_linear1d(ptr, self, coutputSizeDataPtr, coutputSizeLen, calignCorners, cscales) +} +func AtgUpsampleLinear1dBackward(ptr *Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, alignCorners int32, scales float64){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) +cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) +calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) +cscales := *(*C.double)(unsafe.Pointer(&scales)) +C.atg_upsample_linear1d_backward(ptr, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, calignCorners, cscales) +} +func AtgUpsampleLinear1dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, alignCorners int32, scales float64){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) +cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) +calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) +cscales := *(*C.double)(unsafe.Pointer(&scales)) +C.atg_upsample_linear1d_backward_out(ptr, gradInput, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, calignCorners, cscales) +} +func AtgUpsampleLinear1dOut(ptr *Ctensor, out Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, alignCorners int32, scales float64){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) +cscales := *(*C.double)(unsafe.Pointer(&scales)) +C.atg_upsample_linear1d_out(ptr, out, self, coutputSizeDataPtr, coutputSizeLen, calignCorners, cscales) +} +func AtgUpsampleNearest1d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, scales float64){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +cscales := *(*C.double)(unsafe.Pointer(&scales)) +C.atg_upsample_nearest1d(ptr, self, coutputSizeDataPtr, coutputSizeLen, cscales) +} +func AtgUpsampleNearest1dBackward(ptr *Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, scales float64){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) +cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) +cscales := *(*C.double)(unsafe.Pointer(&scales)) +C.atg_upsample_nearest1d_backward(ptr, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, cscales) +} +func AtgUpsampleNearest1dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, scales float64){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) +cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) +cscales := *(*C.double)(unsafe.Pointer(&scales)) +C.atg_upsample_nearest1d_backward_out(ptr, gradInput, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, cscales) +} +func AtgUpsampleNearest1dOut(ptr *Ctensor, out Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, scales float64){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +cscales := *(*C.double)(unsafe.Pointer(&scales)) +C.atg_upsample_nearest1d_out(ptr, out, self, coutputSizeDataPtr, coutputSizeLen, cscales) +} +func AtgUpsampleNearest2d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, scalesH float64, scalesW float64){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) +cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) +C.atg_upsample_nearest2d(ptr, self, coutputSizeDataPtr, coutputSizeLen, cscalesH, cscalesW) +} +func AtgUpsampleNearest2dBackward(ptr *Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, scalesH float64, scalesW float64){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) +cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) +cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) +cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) +C.atg_upsample_nearest2d_backward(ptr, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, cscalesH, cscalesW) +} +func AtgUpsampleNearest2dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, scalesH float64, scalesW float64){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) +cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) +cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) +cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) +C.atg_upsample_nearest2d_backward_out(ptr, gradInput, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, cscalesH, cscalesW) +} +func AtgUpsampleNearest2dOut(ptr *Ctensor, out Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, scalesH float64, scalesW float64){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) +cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) +C.atg_upsample_nearest2d_out(ptr, out, self, coutputSizeDataPtr, coutputSizeLen, cscalesH, cscalesW) +} +func AtgUpsampleNearest3d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, scalesD float64, scalesH float64, scalesW float64){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +cscalesD := *(*C.double)(unsafe.Pointer(&scalesD)) +cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) +cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) +C.atg_upsample_nearest3d(ptr, self, coutputSizeDataPtr, coutputSizeLen, cscalesD, cscalesH, cscalesW) +} +func AtgUpsampleNearest3dBackward(ptr *Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, scalesD float64, scalesH float64, scalesW float64){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) +cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) +cscalesD := *(*C.double)(unsafe.Pointer(&scalesD)) +cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) +cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) +C.atg_upsample_nearest3d_backward(ptr, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, cscalesD, cscalesH, cscalesW) +} +func AtgUpsampleNearest3dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, scalesD float64, scalesH float64, scalesW float64){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) +cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) +cscalesD := *(*C.double)(unsafe.Pointer(&scalesD)) +cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) +cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) +C.atg_upsample_nearest3d_backward_out(ptr, gradInput, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, cscalesD, cscalesH, cscalesW) +} +func AtgUpsampleNearest3dOut(ptr *Ctensor, out Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, scalesD float64, scalesH float64, scalesW float64){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +cscalesD := *(*C.double)(unsafe.Pointer(&scalesD)) +cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) +cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) +C.atg_upsample_nearest3d_out(ptr, out, self, coutputSizeDataPtr, coutputSizeLen, cscalesD, cscalesH, cscalesW) +} +func AtgUpsampleTrilinear3d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, alignCorners int32, scalesD float64, scalesH float64, scalesW float64){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) +cscalesD := *(*C.double)(unsafe.Pointer(&scalesD)) +cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) +cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) +C.atg_upsample_trilinear3d(ptr, self, coutputSizeDataPtr, coutputSizeLen, calignCorners, cscalesD, cscalesH, cscalesW) +} +func AtgUpsampleTrilinear3dBackward(ptr *Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, alignCorners int32, scalesD float64, scalesH float64, scalesW float64){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) +cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) +calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) +cscalesD := *(*C.double)(unsafe.Pointer(&scalesD)) +cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) +cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) +C.atg_upsample_trilinear3d_backward(ptr, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, calignCorners, cscalesD, cscalesH, cscalesW) +} +func AtgUpsampleTrilinear3dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, alignCorners int32, scalesD float64, scalesH float64, scalesW float64){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) +cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) +calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) +cscalesD := *(*C.double)(unsafe.Pointer(&scalesD)) +cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) +cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) +C.atg_upsample_trilinear3d_backward_out(ptr, gradInput, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, calignCorners, cscalesD, cscalesH, cscalesW) +} +func AtgUpsampleTrilinear3dOut(ptr *Ctensor, out Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, alignCorners int32, scalesD float64, scalesH float64, scalesW float64){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) +cscalesD := *(*C.double)(unsafe.Pointer(&scalesD)) +cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) +cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) +C.atg_upsample_trilinear3d_out(ptr, out, self, coutputSizeDataPtr, coutputSizeLen, calignCorners, cscalesD, cscalesH, cscalesW) +} +func AtgValues(ptr *Ctensor, self Ctensor){ +C.atg_values(ptr, self) +} +func AtgVar(ptr *Ctensor, self Ctensor, unbiased int32){ +cunbiased := *(*C.int)(unsafe.Pointer(&unbiased)) +C.atg_var(ptr, self, cunbiased) +} +func AtgVar1(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, unbiased int32, keepdim int32){ +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +cunbiased := *(*C.int)(unsafe.Pointer(&unbiased)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_var1(ptr, self, cdimDataPtr, cdimLen, cunbiased, ckeepdim) +} +func AtgVarMean(ptr *Ctensor, self Ctensor, unbiased int32){ +cunbiased := *(*C.int)(unsafe.Pointer(&unbiased)) +C.atg_var_mean(ptr, self, cunbiased) +} +func AtgVarMean1(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, unbiased int32, keepdim int32){ +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +cunbiased := *(*C.int)(unsafe.Pointer(&unbiased)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_var_mean1(ptr, self, cdimDataPtr, cdimLen, cunbiased, ckeepdim) +} +func AtgVarOut(ptr *Ctensor, out Ctensor, self Ctensor, dimData []int64, dimLen int, unbiased int32, keepdim int32){ +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +cunbiased := *(*C.int)(unsafe.Pointer(&unbiased)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_var_out(ptr, out, self, cdimDataPtr, cdimLen, cunbiased, ckeepdim) +} +func AtgView(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int){ +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +C.atg_view(ptr, self, csizeDataPtr, csizeLen) +} +func AtgViewAs(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_view_as(ptr, self, other) +} + +func AtgWhere1(ptr *Ctensor, condition Ctensor, self Ctensor, other Ctensor){ +C.atg_where1(ptr, condition, self, other) +} +func AtgZero_(ptr *Ctensor, self Ctensor){ +C.atg_zero_(ptr, self) +} +func AtgZeros(ptr *Ctensor, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32){ +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_zeros(ptr, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice) +} +func AtgZerosLike(ptr *Ctensor, self Ctensor){ +C.atg_zeros_like(ptr, self) +} +func AtgZerosOut(ptr *Ctensor, out Ctensor, sizeData []int64, sizeLen int){ +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +C.atg_zeros_out(ptr, out, csizeDataPtr, csizeLen) +} diff --git a/nn/conv-transpose.go b/nn/conv-transpose.go index 82cb31b..64e93b0 100644 --- a/nn/conv-transpose.go +++ b/nn/conv-transpose.go @@ -127,12 +127,12 @@ func NewConvTranspose3D(vs *Path, inDim, outDim int64, ksizes []int64, cfg ConvT // ============================================ func (c ConvTranspose1D) Forward(xs ts.Tensor) ts.Tensor { - return ts.MustConvTranspose1D(xs, c.Ws, c.Bs, c.Config.Stride, c.Config.Padding, c.Config.OutputPadding, c.Config.Dilation, c.Config.Groups) + return ts.MustConvTranspose1d(xs, c.Ws, c.Bs, c.Config.Stride, c.Config.Padding, c.Config.OutputPadding, c.Config.Groups, c.Config.Dilation) } func (c ConvTranspose2D) Forward(xs ts.Tensor) ts.Tensor { - return ts.MustConvTranspose2D(xs, c.Ws, c.Bs, c.Config.Stride, c.Config.Padding, c.Config.OutputPadding, c.Config.Dilation, c.Config.Groups) + return ts.MustConvTranspose2d(xs, c.Ws, c.Bs, c.Config.Stride, c.Config.Padding, c.Config.OutputPadding, c.Config.Groups, c.Config.Dilation) } func (c ConvTranspose3D) Forward(xs ts.Tensor) ts.Tensor { - return ts.MustConvTranspose3D(xs, c.Ws, c.Bs, c.Config.Stride, c.Config.Padding, c.Config.OutputPadding, c.Config.Dilation, c.Config.Groups) + return ts.MustConvTranspose3d(xs, c.Ws, c.Bs, c.Config.Stride, c.Config.Padding, c.Config.OutputPadding, c.Config.Groups, c.Config.Dilation) } diff --git a/nn/conv.go b/nn/conv.go index 3136212..5e5d93e 100644 --- a/nn/conv.go +++ b/nn/conv.go @@ -217,14 +217,14 @@ func NewConv(vs Path, inDim, outDim int64, ksizes []int64, config interface{}) C // ============================================ func (c Conv1D) Forward(xs ts.Tensor) ts.Tensor { - return ts.MustConv1D(xs, c.Ws, c.Bs, c.Config.Stride, c.Config.Padding, c.Config.Dilation, c.Config.Groups) + return ts.MustConv1d(xs, c.Ws, c.Bs, c.Config.Stride, c.Config.Padding, c.Config.Dilation, c.Config.Groups) } func (c Conv2D) Forward(xs ts.Tensor) ts.Tensor { - return ts.MustConv2D(xs, c.Ws, c.Bs, c.Config.Stride, c.Config.Padding, c.Config.Dilation, c.Config.Groups) + return ts.MustConv2d(xs, c.Ws, c.Bs, c.Config.Stride, c.Config.Padding, c.Config.Dilation, c.Config.Groups) } func (c Conv3D) Forward(xs ts.Tensor) ts.Tensor { - return ts.MustConv3D(xs, c.Ws, c.Bs, c.Config.Stride, c.Config.Padding, c.Config.Dilation, c.Config.Groups) + return ts.MustConv3d(xs, c.Ws, c.Bs, c.Config.Stride, c.Config.Padding, c.Config.Dilation, c.Config.Groups) } // Implement ModuleT for Conv1D, Conv2D, Conv3D: @@ -233,12 +233,12 @@ func (c Conv3D) Forward(xs ts.Tensor) ts.Tensor { // NOTE: `train` param won't be used, will be? func (c Conv1D) ForwardT(xs ts.Tensor, train bool) ts.Tensor { - return ts.MustConv1D(xs, c.Ws, c.Bs, c.Config.Stride, c.Config.Padding, c.Config.Dilation, c.Config.Groups) + return ts.MustConv1d(xs, c.Ws, c.Bs, c.Config.Stride, c.Config.Padding, c.Config.Dilation, c.Config.Groups) } func (c Conv2D) ForwardT(xs ts.Tensor, train bool) ts.Tensor { - return ts.MustConv2D(xs, c.Ws, c.Bs, c.Config.Stride, c.Config.Padding, c.Config.Dilation, c.Config.Groups) + return ts.MustConv2d(xs, c.Ws, c.Bs, c.Config.Stride, c.Config.Padding, c.Config.Dilation, c.Config.Groups) } func (c Conv3D) ForwardT(xs ts.Tensor, train bool) ts.Tensor { - return ts.MustConv3D(xs, c.Ws, c.Bs, c.Config.Stride, c.Config.Padding, c.Config.Dilation, c.Config.Groups) + return ts.MustConv3d(xs, c.Ws, c.Bs, c.Config.Stride, c.Config.Padding, c.Config.Dilation, c.Config.Groups) } diff --git a/nn/init.go b/nn/init.go index 37f3965..dc796a2 100644 --- a/nn/init.go +++ b/nn/init.go @@ -30,12 +30,12 @@ func NewConstInit(v float64) constInit { func (c constInit) InitTensor(dims []int64, device gotch.Device) (retVal ts.Tensor) { var err error - kind := gotch.Float.CInt() + kind := gotch.Float switch { case c.value == 0.0: - retVal = ts.MustZeros(dims, kind, device.CInt()) + retVal = ts.MustZeros(dims, kind, device) case c.value == 1.0: - retVal = ts.MustOnes(dims, kind, device.CInt()) + retVal = ts.MustOnes(dims, kind, device) default: data := make([]float64, ts.FlattenDim(dims)) for i := range data { @@ -127,8 +127,8 @@ func NewUniformInit(lo, up float64) uniformInit { func (u uniformInit) InitTensor(dims []int64, device gotch.Device) (retVal ts.Tensor) { var err error - kind := gotch.Float.CInt() - retVal = ts.MustZeros(dims, kind, device.CInt()) + kind := gotch.Float + retVal = ts.MustZeros(dims, kind, device) retVal.Uniform_(u.lo, u.up) if err != nil { log.Fatalf("uniformInit - InitTensor method call error: %v\n", err) @@ -158,8 +158,8 @@ func (k kaimingUniformInit) InitTensor(dims []int64, device gotch.Device) (retVa } bound := math.Sqrt(1.0 / float64(fanIn)) - kind := gotch.Float.CInt() - retVal = ts.MustZeros(dims, kind, device.CInt()) + kind := gotch.Float + retVal = ts.MustZeros(dims, kind, device) retVal.Uniform_(-bound, bound) return retVal diff --git a/nn/linear.go b/nn/linear.go index ee52cc3..90e0b83 100644 --- a/nn/linear.go +++ b/nn/linear.go @@ -43,7 +43,7 @@ func NewLinear(vs Path, inDim, outDim int64, c LinearConfig) Linear { // bs has size of output dimension switch c.Bias { case false: - bs = ts.MustZeros([]int64{outDim}, gotch.Float.CInt(), vs.Device().CInt()) + bs = ts.MustZeros([]int64{outDim}, gotch.Float, vs.Device()) case true: switch { case c.BsInit == nil: @@ -91,7 +91,7 @@ func NewLinear(vs Path, inDim, outDim int64, c LinearConfig) Linear { // 1 1 1 ] func (l Linear) Forward(xs ts.Tensor) (retVal ts.Tensor) { - mul := xs.MustMatMul(l.Ws, false) + mul := xs.MustMatmul(l.Ws, false) return mul.MustAdd(l.Bs, true) } @@ -100,6 +100,6 @@ func (l Linear) Forward(xs ts.Tensor) (retVal ts.Tensor) { // NOTE: train param will not be used. func (l Linear) ForwardT(xs ts.Tensor, train bool) (retVal ts.Tensor) { - mul := xs.MustMatMul(l.Ws, false) + mul := xs.MustMatmul(l.Ws, false) return mul.MustAdd(l.Bs, true) } diff --git a/nn/rnn.go b/nn/rnn.go index 42de4b4..675ff2f 100644 --- a/nn/rnn.go +++ b/nn/rnn.go @@ -131,7 +131,7 @@ func (l LSTM) ZeroState(batchDim int64) (retVal State) { layerDim := l.config.NumLayers * numDirections shape := []int64{layerDim, batchDim, l.hiddenDim} - zeros := ts.MustZeros(shape, gotch.Float.CInt(), l.device.CInt()) + zeros := ts.MustZeros(shape, gotch.Float, l.device) return LSTMState{ Tensor1: zeros.MustShallowClone(), @@ -157,7 +157,7 @@ func (l LSTM) Seq(input ts.Tensor) (ts.Tensor, State) { func (l LSTM) SeqInit(input ts.Tensor, inState State) (ts.Tensor, State) { - output, h, c := input.MustLSTM([]ts.Tensor{inState.(LSTMState).Tensor1, inState.(LSTMState).Tensor2}, l.flatWeights, l.config.HasBiases, l.config.NumLayers, l.config.Dropout, l.config.Train, l.config.Bidirectional, l.config.BatchFirst) + output, h, c := input.MustLstm([]ts.Tensor{inState.(LSTMState).Tensor1, inState.(LSTMState).Tensor2}, l.flatWeights, l.config.HasBiases, l.config.NumLayers, l.config.Dropout, l.config.Train, l.config.Bidirectional, l.config.BatchFirst) return output, LSTMState{ Tensor1: h, @@ -229,7 +229,7 @@ func (g GRU) ZeroState(batchDim int64) (retVal State) { layerDim := g.config.NumLayers * numDirections shape := []int64{layerDim, batchDim, g.hiddenDim} - tensor := ts.MustZeros(shape, gotch.Float.CInt(), g.device.CInt()) + tensor := ts.MustZeros(shape, gotch.Float, g.device) return GRUState{Tensor: tensor} } @@ -252,7 +252,7 @@ func (g GRU) Seq(input ts.Tensor) (ts.Tensor, State) { func (g GRU) SeqInit(input ts.Tensor, inState State) (ts.Tensor, State) { - output, h := input.MustGRU(inState.(GRUState).Tensor, g.flatWeights, g.config.HasBiases, g.config.NumLayers, g.config.Dropout, g.config.Train, g.config.Bidirectional, g.config.BatchFirst) + output, h := input.MustGru(inState.(GRUState).Tensor, g.flatWeights, g.config.HasBiases, g.config.NumLayers, g.config.Dropout, g.config.Train, g.config.Bidirectional, g.config.BatchFirst) return output, GRUState{Tensor: h} } diff --git a/nn/sequential.go b/nn/sequential.go index f7c757a..446819e 100644 --- a/nn/sequential.go +++ b/nn/sequential.go @@ -258,7 +258,7 @@ func BatchAccuracyForLogits(vs VarStore, m ts.ModuleT, xs, ys ts.Tensor, d gotch logits := m.ForwardT(bImages, false) acc := logits.AccuracyForLogits(bLabels) - sumAccuracy += acc.Values()[0] * size + sumAccuracy += acc.Float64Values()[0] * size sampleCount += size bImages.MustDrop() @@ -310,7 +310,7 @@ func BatchAccuracyForLogitsIdx(vs VarStore, m ts.ModuleT, xs, ys ts.Tensor, d go logits := m.ForwardT(bImages, true) bAccuracy := logits.AccuracyForLogits(bLabels) - accuVal := bAccuracy.Values()[0] + accuVal := bAccuracy.Float64Values()[0] bSamples := float64(xs.MustSize()[0]) sumAccuracy += accuVal * bSamples sampleCount += bSamples diff --git a/nn/varstore.go b/nn/varstore.go index de01790..e6cc6ee 100644 --- a/nn/varstore.go +++ b/nn/varstore.go @@ -239,7 +239,7 @@ func (vs *VarStore) Freeze() { defer vs.Vars.mutex.Unlock() for _, v := range vs.Vars.TrainableVariables { - _, err := v.SetRequiresGrad(false) + _, err := v.SetRequiresGrad(false, false) if err != nil { log.Fatalf("Freeze() Error: %v\n", err) } @@ -254,7 +254,7 @@ func (vs *VarStore) Unfreeze() { defer vs.Vars.mutex.Unlock() for _, v := range vs.Vars.TrainableVariables { - _, err := v.SetRequiresGrad(true) + _, err := v.SetRequiresGrad(true, false) if err != nil { log.Fatalf("Unfreeze() Error: %v\n", err) } @@ -349,7 +349,7 @@ func (p *Path) add(name string, newTs ts.Tensor, trainable bool) (retVal ts.Tens err error ) if trainable { - tensor, err = newTs.MustShallowClone().SetRequiresGrad(true) + tensor, err = newTs.MustShallowClone().SetRequiresGrad(true, false) if err != nil { log.Fatalf("Path 'add' method error: %v\n", err) } @@ -378,7 +378,7 @@ func (p *Path) getOrAddWithLock(name string, tensor ts.Tensor, trainable bool, v var err error var ttensor ts.Tensor if trainable { - ttensor, err = tensor.SetRequiresGrad(true) + ttensor, err = tensor.SetRequiresGrad(true, false) if err != nil { log.Fatalf("Path - call method 'getOrAddWithLock' error: %v\n", err) } @@ -403,9 +403,8 @@ func (p *Path) getOrAddWithLock(name string, tensor ts.Tensor, trainable bool, v // The variable uses a float tensor initialized with zeros. func (p *Path) ZerosNoTrain(name string, dims []int64) (retVal ts.Tensor) { - dtype, err := gotch.DType2CInt(gotch.Float) // DType Float - device := p.Device().CInt() - z, err := ts.Zeros(dims, dtype, device) + device := p.Device() + z, err := ts.Zeros(dims, gotch.Float, device) if err != nil { log.Fatalf("Path - 'ZerosNoTrain' method call error: %v\n", err) } @@ -421,9 +420,8 @@ func (p *Path) ZerosNoTrain(name string, dims []int64) (retVal ts.Tensor) { // The variable uses a float tensor initialized with ones. func (p *Path) OnesNoTrain(name string, dims []int64) (retVal ts.Tensor) { - dtype, err := gotch.DType2CInt(gotch.Float) // DType Float - device := p.Device().CInt() - z, err := ts.Ones(dims, dtype, device) + device := p.Device() + z, err := ts.Ones(dims, gotch.Float, device) if err != nil { log.Fatalf("Path - 'OnesNoTrain' method call error: %v\n", err) } @@ -610,7 +608,7 @@ func (e *Entry) OrOnes(dims []int64) (retVal ts.Tensor) { // OrOnesNoTrain returns the existing entry if, otherwise create a new variable. func (e *Entry) OrOnesNoTrain(dims []int64) (retVal ts.Tensor) { - o := ts.MustOnes(dims, gotch.Float.CInt(), e.path.Device().CInt()) + o := ts.MustOnes(dims, gotch.Float, e.path.Device()) return e.path.getOrAddWithLock(e.name, o, true, *e.variables) } @@ -641,7 +639,7 @@ func (e *Entry) OrZeros(dims []int64) (retVal ts.Tensor) { // OrZerosNoTrain returns the existing entry if, otherwise create a new variable. func (e *Entry) OrZerosNoTrain(dims []int64) (retVal ts.Tensor) { - z := ts.MustZeros(dims, gotch.Float.CInt(), e.path.Device().CInt()) + z := ts.MustZeros(dims, gotch.Float, e.path.Device()) return e.path.getOrAddWithLock(e.name, z, true, *e.variables) } diff --git a/nn/varstore_test.go b/nn/varstore_test.go index 25e9c98..0aa1a17 100644 --- a/nn/varstore_test.go +++ b/nn/varstore_test.go @@ -74,10 +74,10 @@ func TestSaveLoad(t *testing.T) { wantU2 := float64(0.0) wantV2 := float64(1.0) - gotU1 := u1.MustMean(gotch.Float.CInt(), false).Values()[0] - gotV1 := v1.MustMean(gotch.Float.CInt(), false).Values()[0] - gotU2 := u2.MustMean(gotch.Float.CInt(), false).Values()[0] - gotV2 := v2.MustMean(gotch.Float.CInt(), false).Values()[0] + gotU1 := u1.MustMean(gotch.Float, false).Float64Values()[0] + gotV1 := v1.MustMean(gotch.Float, false).Float64Values()[0] + gotU2 := u2.MustMean(gotch.Float, false).Float64Values()[0] + gotV2 := v2.MustMean(gotch.Float, false).Float64Values()[0] if !reflect.DeepEqual(wantU1, gotU1) { t.Errorf("Expected u1: %v\n", wantU1) @@ -109,8 +109,8 @@ func TestSaveLoad(t *testing.T) { wantU2 = float64(42.0) wantV2 = float64(2.0) - gotU2 = u2.MustMean(gotch.Float.CInt(), false).Values()[0] - gotV2 = v2.MustMean(gotch.Float.CInt(), false).Values()[0] + gotU2 = u2.MustMean(gotch.Float, false).Float64Values()[0] + gotV2 = v2.MustMean(gotch.Float, false).Float64Values()[0] if !reflect.DeepEqual(wantU1, gotU1) { t.Errorf("Expected u1: %v\n", wantU1) diff --git a/tensor/jit_test.go b/tensor/jit_test.go index 6183a7f..187b4ef 100644 --- a/tensor/jit_test.go +++ b/tensor/jit_test.go @@ -59,7 +59,7 @@ func TestModuleForwardTs(t *testing.T) { if err != nil { t.Error(err) } - got := int(res.Values()[0]) + got := int(res.Float64Values()[0]) want := 1421 diff --git a/tensor/macro.go b/tensor/macro.go deleted file mode 100644 index 04a7d37..0000000 --- a/tensor/macro.go +++ /dev/null @@ -1,3 +0,0 @@ -package tensor - -// TODO: implement tensor.From macro diff --git a/tensor/must-tensor-generated.go b/tensor/must-tensor-generated.go new file mode 100644 index 0000000..dd93c89 --- /dev/null +++ b/tensor/must-tensor-generated.go @@ -0,0 +1,8043 @@ +package tensor + +// NOTE. THIS FILE IS AUTOMATICALLY GENERATED, DO NOT EDIT BY HAND! + +import( + "log" + + "github.com/sugarme/gotch" +) + + +func(ts Tensor) Must__And_(other Scalar)() { + + err := ts.__And_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) Must__And1(other Tensor)() { + + err := ts.__And1(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) Must__Iand_(other Scalar)() { + + err := ts.__Iand_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) Must__Iand1(other Tensor)() { + + err := ts.__Iand1(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) Must__Ilshift_(other Scalar)() { + + err := ts.__Ilshift_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) Must__Ilshift1(other Tensor)() { + + err := ts.__Ilshift1(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) Must__Ior_(other Scalar)() { + + err := ts.__Ior_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) Must__Ior1(other Tensor)() { + + err := ts.__Ior1(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) Must__Irshift_(other Scalar)() { + + err := ts.__Irshift_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) Must__Irshift1(other Tensor)() { + + err := ts.__Irshift1(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) Must__Ixor_(other Scalar)() { + + err := ts.__Ixor_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) Must__Ixor1(other Tensor)() { + + err := ts.__Ixor1(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) Must__Lshift_(other Scalar)() { + + err := ts.__Lshift_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) Must__Lshift1(other Tensor)() { + + err := ts.__Lshift1(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) Must__Or_(other Scalar)() { + + err := ts.__Or_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) Must__Or1(other Tensor)() { + + err := ts.__Or1(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) Must__Rshift_(other Scalar)() { + + err := ts.__Rshift_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) Must__Rshift1(other Tensor)() { + + err := ts.__Rshift1(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) Must__Xor_(other Scalar)() { + + err := ts.__Xor_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) Must__Xor1(other Tensor)() { + + err := ts.__Xor1(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) Must_AdaptiveAvgPool2d(outputSize []int64, del bool)(retVal Tensor) { + + retVal, err := ts._AdaptiveAvgPool2d(outputSize, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) Must_AdaptiveAvgPool2dBackward(gradOutput Tensor, del bool)(retVal Tensor) { + + retVal, err := ts._AdaptiveAvgPool2dBackward(gradOutput, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) Must_Addr(vec1 Tensor, vec2 Tensor, del bool)(retVal Tensor) { + + retVal, err := ts._Addr(vec1, vec2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) Must_Addr_(vec1 Tensor, vec2 Tensor)() { + + err := ts._Addr_(vec1, vec2) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) Must_AddrOut(out Tensor, vec1 Tensor, vec2 Tensor, del bool)(retVal Tensor) { + + retVal, err := ts._AddrOut(out, vec1, vec2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_AmpUpdateScale(growthTracker Tensor, currentScale Tensor, foundInf Tensor, scaleGrowthFactor float64, scaleBackoffFactor float64, growthInterval int64)(retVal Tensor) { + + retVal, err := _AmpUpdateScale(growthTracker, currentScale, foundInf, scaleGrowthFactor, scaleBackoffFactor, growthInterval) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) Must_BaddbmmMkl_(batch1 Tensor, batch2 Tensor)() { + + err := ts._BaddbmmMkl_(batch1, batch2) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) Must_CastByte(nonBlocking bool, del bool)(retVal Tensor) { + + retVal, err := ts._CastByte(nonBlocking, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) Must_CastChar(nonBlocking bool, del bool)(retVal Tensor) { + + retVal, err := ts._CastChar(nonBlocking, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) Must_CastDouble(nonBlocking bool, del bool)(retVal Tensor) { + + retVal, err := ts._CastDouble(nonBlocking, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) Must_CastFloat(nonBlocking bool, del bool)(retVal Tensor) { + + retVal, err := ts._CastFloat(nonBlocking, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) Must_CastHalf(nonBlocking bool, del bool)(retVal Tensor) { + + retVal, err := ts._CastHalf(nonBlocking, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) Must_CastInt(nonBlocking bool, del bool)(retVal Tensor) { + + retVal, err := ts._CastInt(nonBlocking, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) Must_CastLong(nonBlocking bool, del bool)(retVal Tensor) { + + retVal, err := ts._CastLong(nonBlocking, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) Must_CastShort(nonBlocking bool, del bool)(retVal Tensor) { + + retVal, err := ts._CastShort(nonBlocking, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_Cat(tensors []Tensor, dim int64)(retVal Tensor) { + + retVal, err := _Cat(tensors, dim) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_CatOut(out Tensor, tensors []Tensor, dim int64)(retVal Tensor) { + + retVal, err := _CatOut(out, tensors, dim) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_CdistBackward(grad Tensor, x1 Tensor, x2 Tensor, p float64, cdist Tensor)(retVal Tensor) { + + retVal, err := _CdistBackward(grad, x1, x2, p, cdist) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) Must_CholeskyHelper(upper bool, del bool)(retVal Tensor) { + + retVal, err := ts._CholeskyHelper(upper, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) Must_CholeskySolveHelper(a Tensor, upper bool, del bool)(retVal Tensor) { + + retVal, err := ts._CholeskySolveHelper(a, upper, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) Must_Coalesced_(coalesced bool)() { + + err := ts._Coalesced_(coalesced) + if err != nil { log.Fatal(err) } + + return +} + +func Must_Convolution(input Tensor, weight Tensor, bias Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64, groups int64, benchmark bool, deterministic bool, cudnnEnabled bool)(retVal Tensor) { + + retVal, err := _Convolution(input, weight, bias, stride, padding, dilation, transposed, outputPadding, groups, benchmark, deterministic, cudnnEnabled) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_ConvolutionNogroup(input Tensor, weight Tensor, bias Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64)(retVal Tensor) { + + retVal, err := _ConvolutionNogroup(input, weight, bias, stride, padding, dilation, transposed, outputPadding) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) Must_CopyFrom(dst Tensor, nonBlocking bool, del bool)(retVal Tensor) { + + retVal, err := ts._CopyFrom(dst, nonBlocking, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_CtcLossBackward(grad Tensor, logProbs Tensor, targets Tensor, inputLengths []int64, targetLengths []int64, negLogLikelihood Tensor, logAlpha Tensor, blank int64, zeroInfinity bool)(retVal Tensor) { + + retVal, err := _CtcLossBackward(grad, logProbs, targets, inputLengths, targetLengths, negLogLikelihood, logAlpha, blank, zeroInfinity) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_CudnnInitDropoutState(dropout float64, train bool, dropoutSeed int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { + + retVal, err := _CudnnInitDropoutState(dropout, train, dropoutSeed, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_CudnnRnnFlattenWeight(weightArr []Tensor, weightStride0 int64, inputSize int64, mode int64, hiddenSize int64, numLayers int64, batchFirst bool, bidirectional bool)(retVal Tensor) { + + retVal, err := _CudnnRnnFlattenWeight(weightArr, weightStride0, inputSize, mode, hiddenSize, numLayers, batchFirst, bidirectional) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) Must_Cumprod(dim int64, del bool)(retVal Tensor) { + + retVal, err := ts._Cumprod(dim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) Must_CumprodOut(out Tensor, dim int64, del bool)(retVal Tensor) { + + retVal, err := ts._CumprodOut(out, dim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) Must_Cumsum(dim int64, del bool)(retVal Tensor) { + + retVal, err := ts._Cumsum(dim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) Must_CumsumOut(out Tensor, dim int64, del bool)(retVal Tensor) { + + retVal, err := ts._CumsumOut(out, dim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_DimArange(like Tensor, dim int64)(retVal Tensor) { + + retVal, err := _DimArange(like, dim) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_DirichletGrad(x Tensor, alpha Tensor, total Tensor)(retVal Tensor) { + + retVal, err := _DirichletGrad(x, alpha, total) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_EmbeddingBagBackward(grad Tensor, indices Tensor, offsets Tensor, offset2bag Tensor, bagSize Tensor, maximumIndices Tensor, numWeights int64, scaleGradByFreq bool, mode int64, sparse bool, perSampleWeights Tensor)(retVal Tensor) { + + retVal, err := _EmbeddingBagBackward(grad, indices, offsets, offset2bag, bagSize, maximumIndices, numWeights, scaleGradByFreq, mode, sparse, perSampleWeights) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_EmbeddingBagDenseBackward(grad Tensor, indices Tensor, offsets Tensor, offset2bag Tensor, bagSize Tensor, maximumIndices Tensor, numWeights int64, scaleGradByFreq bool, mode int64, perSampleWeights Tensor)(retVal Tensor) { + + retVal, err := _EmbeddingBagDenseBackward(grad, indices, offsets, offset2bag, bagSize, maximumIndices, numWeights, scaleGradByFreq, mode, perSampleWeights) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_EmbeddingBagPerSampleWeightsBackward(grad Tensor, weight Tensor, indices Tensor, offsets Tensor, offset2bag Tensor, mode int64)(retVal Tensor) { + + retVal, err := _EmbeddingBagPerSampleWeightsBackward(grad, weight, indices, offsets, offset2bag, mode) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_EmbeddingBagSparseBackward(grad Tensor, indices Tensor, offsets Tensor, offset2bag Tensor, bagSize Tensor, numWeights int64, scaleGradByFreq bool, mode int64, perSampleWeights Tensor)(retVal Tensor) { + + retVal, err := _EmbeddingBagSparseBackward(grad, indices, offsets, offset2bag, bagSize, numWeights, scaleGradByFreq, mode, perSampleWeights) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_EmptyAffineQuantized(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device, scale float64, zeroPoint int64)(retVal Tensor) { + + retVal, err := _EmptyAffineQuantized(size, optionsKind, optionsDevice, scale, zeroPoint) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_EmptyPerChannelAffineQuantized(size []int64, scales Tensor, zeroPoints Tensor, axis int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { + + retVal, err := _EmptyPerChannelAffineQuantized(size, scales, zeroPoints, axis, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) Must_FftWithSize(signalNdim int64, complexInput bool, complexOutput bool, inverse bool, checkedSignalSizes []int64, normalized bool, onesided bool, outputSizes []int64, del bool)(retVal Tensor) { + + retVal, err := ts._FftWithSize(signalNdim, complexInput, complexOutput, inverse, checkedSignalSizes, normalized, onesided, outputSizes, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) Must_GatherSparseBackward(dim int64, index Tensor, grad Tensor, del bool)(retVal Tensor) { + + retVal, err := ts._GatherSparseBackward(dim, index, grad, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) Must_IndexCopy_(dim int64, index Tensor, source Tensor)() { + + err := ts._IndexCopy_(dim, index, source) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) Must_IndexPutImpl_(indices []Tensor, values Tensor, accumulate bool, unsafety bool)() { + + err := ts._IndexPutImpl_(indices, values, accumulate, unsafety) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) Must_Indices(del bool)(retVal Tensor) { + + retVal, err := ts._Indices(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) Must_InverseHelper(del bool)(retVal Tensor) { + + retVal, err := ts._InverseHelper(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) Must_LogSoftmax(dim int64, halfToFloat bool, del bool)(retVal Tensor) { + + retVal, err := ts._LogSoftmax(dim, halfToFloat, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) Must_LogSoftmaxBackwardData(gradOutput Tensor, output Tensor, dim int64, del bool)(retVal Tensor) { + + retVal, err := ts._LogSoftmaxBackwardData(gradOutput, output, dim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) Must_LuSolveHelper(lUData Tensor, lUPivots Tensor, del bool)(retVal Tensor) { + + retVal, err := ts._LuSolveHelper(lUData, lUPivots, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) Must_MakePerChannelQuantizedTensor(scale Tensor, zeroPoint Tensor, axis int64, del bool)(retVal Tensor) { + + retVal, err := ts._MakePerChannelQuantizedTensor(scale, zeroPoint, axis, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) Must_MakePerTensorQuantizedTensor(scale float64, zeroPoint int64, del bool)(retVal Tensor) { + + retVal, err := ts._MakePerTensorQuantizedTensor(scale, zeroPoint, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) Must_MaskedScale(mask Tensor, scale float64, del bool)(retVal Tensor) { + + retVal, err := ts._MaskedScale(mask, scale, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) Must_MkldnnReshape(shape []int64, del bool)(retVal Tensor) { + + retVal, err := ts._MkldnnReshape(shape, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) Must_MkldnnTranspose(dim0 int64, dim1 int64, del bool)(retVal Tensor) { + + retVal, err := ts._MkldnnTranspose(dim0, dim1, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) Must_MkldnnTranspose_(dim0 int64, dim1 int64)() { + + err := ts._MkldnnTranspose_(dim0, dim1) + if err != nil { log.Fatal(err) } + + return +} + +func Must_MultinomialAliasDraw(j Tensor, q Tensor, numSamples int64)(retVal Tensor) { + + retVal, err := _MultinomialAliasDraw(j, q, numSamples) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_NnpackSpatialConvolution(input Tensor, weight Tensor, bias Tensor, padding []int64, stride []int64)(retVal Tensor) { + + retVal, err := _NnpackSpatialConvolution(input, weight, bias, padding, stride) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_NnpackSpatialConvolutionBackwardInput(input Tensor, gradOutput Tensor, weight Tensor, padding []int64)(retVal Tensor) { + + retVal, err := _NnpackSpatialConvolutionBackwardInput(input, gradOutput, weight, padding) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_NnpackSpatialConvolutionBackwardWeight(input Tensor, weightsize []int64, gradOutput Tensor, padding []int64)(retVal Tensor) { + + retVal, err := _NnpackSpatialConvolutionBackwardWeight(input, weightsize, gradOutput, padding) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_PackPaddedSequenceBackward(grad Tensor, inputSize []int64, batchSizes Tensor, batchFirst bool)(retVal Tensor) { + + retVal, err := _PackPaddedSequenceBackward(grad, inputSize, batchSizes, batchFirst) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) Must_PdistBackward(grad Tensor, p float64, pdist Tensor, del bool)(retVal Tensor) { + + retVal, err := ts._PdistBackward(grad, p, pdist, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) Must_ReshapeFromTensor(shape Tensor, del bool)(retVal Tensor) { + + retVal, err := ts._ReshapeFromTensor(shape, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) Must_SWhere(condition Tensor, other Tensor, del bool)(retVal Tensor) { + + retVal, err := ts._SWhere(condition, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) Must_SampleDirichlet(del bool)(retVal Tensor) { + + retVal, err := ts._SampleDirichlet(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) Must_ShapeAsTensor(del bool)(retVal Tensor) { + + retVal, err := ts._ShapeAsTensor(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) Must_SobolEngineFf_(n int64, sobolstate Tensor, dimension int64, numGenerated int64)() { + + err := ts._SobolEngineFf_(n, sobolstate, dimension, numGenerated) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) Must_SobolEngineInitializeState_(dimension int64)() { + + err := ts._SobolEngineInitializeState_(dimension) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) Must_SobolEngineScramble_(ltm Tensor, dimension int64)() { + + err := ts._SobolEngineScramble_(ltm, dimension) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) Must_Softmax(dim int64, halfToFloat bool, del bool)(retVal Tensor) { + + retVal, err := ts._Softmax(dim, halfToFloat, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) Must_SoftmaxBackwardData(gradOutput Tensor, output Tensor, dim int64, del bool)(retVal Tensor) { + + retVal, err := ts._SoftmaxBackwardData(gradOutput, output, dim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) Must_SparseAddmm(sparse Tensor, dense Tensor, del bool)(retVal Tensor) { + + retVal, err := ts._SparseAddmm(sparse, dense, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_SparseCooTensorUnsafe(indices Tensor, values Tensor, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { + + retVal, err := _SparseCooTensorUnsafe(indices, values, size, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_SparseCooTensorWithDims(sparseDim int64, denseDim int64, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { + + retVal, err := _SparseCooTensorWithDims(sparseDim, denseDim, size, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_SparseCooTensorWithDimsAndTensors(sparseDim int64, denseDim int64, size []int64, indices Tensor, values Tensor, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { + + retVal, err := _SparseCooTensorWithDimsAndTensors(sparseDim, denseDim, size, indices, values, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_SparseMm(sparse Tensor, dense Tensor)(retVal Tensor) { + + retVal, err := _SparseMm(sparse, dense) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) Must_SparseSum(del bool)(retVal Tensor) { + + retVal, err := ts._SparseSum(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) Must_SparseSum1(dtype gotch.DType, del bool)(retVal Tensor) { + + retVal, err := ts._SparseSum1(dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) Must_SparseSum2(dim []int64, del bool)(retVal Tensor) { + + retVal, err := ts._SparseSum2(dim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) Must_SparseSum3(dim []int64, dtype gotch.DType, del bool)(retVal Tensor) { + + retVal, err := ts._SparseSum3(dim, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) Must_SparseSumBackward(grad Tensor, dim []int64, del bool)(retVal Tensor) { + + retVal, err := ts._SparseSumBackward(grad, dim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) Must_StandardGamma(del bool)(retVal Tensor) { + + retVal, err := ts._StandardGamma(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) Must_StandardGammaGrad(output Tensor, del bool)(retVal Tensor) { + + retVal, err := ts._StandardGammaGrad(output, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) Must_Std(unbiased bool, del bool)(retVal Tensor) { + + retVal, err := ts._Std(unbiased, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_Trilinear(i1 Tensor, i2 Tensor, i3 Tensor, expand1 []int64, expand2 []int64, expand3 []int64, sumdim []int64, unrollDim int64)(retVal Tensor) { + + retVal, err := _Trilinear(i1, i2, i3, expand1, expand2, expand3, sumdim, unrollDim) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) Must_UnsafeView(size []int64, del bool)(retVal Tensor) { + + retVal, err := ts._UnsafeView(size, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) Must_Values(del bool)(retVal Tensor) { + + retVal, err := ts._Values(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) Must_Var(unbiased bool, del bool)(retVal Tensor) { + + retVal, err := ts._Var(unbiased, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_WeightNorm(v Tensor, g Tensor, dim int64)(retVal Tensor) { + + retVal, err := _WeightNorm(v, g, dim) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustAbs(del bool)(retVal Tensor) { + + retVal, err := ts.Abs(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustAbs_()() { + + err := ts.Abs_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustAbsOut(out Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.AbsOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustAcos(del bool)(retVal Tensor) { + + retVal, err := ts.Acos(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustAcos_()() { + + err := ts.Acos_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustAcosOut(out Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.AcosOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustAdaptiveAvgPool1d(outputSize []int64, del bool)(retVal Tensor) { + + retVal, err := ts.AdaptiveAvgPool1d(outputSize, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustAdaptiveAvgPool2d(outputSize []int64, del bool)(retVal Tensor) { + + retVal, err := ts.AdaptiveAvgPool2d(outputSize, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustAdaptiveAvgPool2dOut(out Tensor, outputSize []int64, del bool)(retVal Tensor) { + + retVal, err := ts.AdaptiveAvgPool2dOut(out, outputSize, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustAdaptiveAvgPool3d(outputSize []int64, del bool)(retVal Tensor) { + + retVal, err := ts.AdaptiveAvgPool3d(outputSize, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustAdaptiveAvgPool3dBackward(gradOutput Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.AdaptiveAvgPool3dBackward(gradOutput, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustAdaptiveAvgPool3dBackwardOut(gradInput Tensor, gradOutput Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.AdaptiveAvgPool3dBackwardOut(gradInput, gradOutput, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustAdaptiveAvgPool3dOut(out Tensor, outputSize []int64, del bool)(retVal Tensor) { + + retVal, err := ts.AdaptiveAvgPool3dOut(out, outputSize, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustAdaptiveMaxPool2dBackward(gradOutput Tensor, indices Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.AdaptiveMaxPool2dBackward(gradOutput, indices, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustAdaptiveMaxPool2dBackwardOut(gradInput Tensor, gradOutput Tensor, indices Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.AdaptiveMaxPool2dBackwardOut(gradInput, gradOutput, indices, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustAdaptiveMaxPool3dBackward(gradOutput Tensor, indices Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.AdaptiveMaxPool3dBackward(gradOutput, indices, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustAdaptiveMaxPool3dBackwardOut(gradInput Tensor, gradOutput Tensor, indices Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.AdaptiveMaxPool3dBackwardOut(gradInput, gradOutput, indices, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustAdd(other Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.Add(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustAdd1(other Scalar, del bool)(retVal Tensor) { + + retVal, err := ts.Add1(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustAdd_(other Tensor)() { + + err := ts.Add_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustAdd1_(other Scalar)() { + + err := ts.Add1_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustAddOut(out Tensor, other Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.AddOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustAddbmm(batch1 Tensor, batch2 Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.Addbmm(batch1, batch2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustAddbmm_(batch1 Tensor, batch2 Tensor)() { + + err := ts.Addbmm_(batch1, batch2) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustAddbmmOut(out Tensor, batch1 Tensor, batch2 Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.AddbmmOut(out, batch1, batch2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustAddcdiv(tensor1 Tensor, tensor2 Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.Addcdiv(tensor1, tensor2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustAddcdiv_(tensor1 Tensor, tensor2 Tensor)() { + + err := ts.Addcdiv_(tensor1, tensor2) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustAddcdivOut(out Tensor, tensor1 Tensor, tensor2 Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.AddcdivOut(out, tensor1, tensor2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustAddcmul(tensor1 Tensor, tensor2 Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.Addcmul(tensor1, tensor2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustAddcmul_(tensor1 Tensor, tensor2 Tensor)() { + + err := ts.Addcmul_(tensor1, tensor2) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustAddcmulOut(out Tensor, tensor1 Tensor, tensor2 Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.AddcmulOut(out, tensor1, tensor2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustAddmm(mat1 Tensor, mat2 Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.Addmm(mat1, mat2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustAddmm_(mat1 Tensor, mat2 Tensor)() { + + err := ts.Addmm_(mat1, mat2) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustAddmmOut(out Tensor, mat1 Tensor, mat2 Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.AddmmOut(out, mat1, mat2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustAddmv(mat Tensor, vec Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.Addmv(mat, vec, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustAddmv_(mat Tensor, vec Tensor)() { + + err := ts.Addmv_(mat, vec) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustAddmvOut(out Tensor, mat Tensor, vec Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.AddmvOut(out, mat, vec, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustAddr(vec1 Tensor, vec2 Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.Addr(vec1, vec2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustAddr_(vec1 Tensor, vec2 Tensor)() { + + err := ts.Addr_(vec1, vec2) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustAddrOut(out Tensor, vec1 Tensor, vec2 Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.AddrOut(out, vec1, vec2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustAffineGridGenerator(theta Tensor, size []int64, alignCorners bool)(retVal Tensor) { + + retVal, err := AffineGridGenerator(theta, size, alignCorners) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustAffineGridGeneratorBackward(grad Tensor, size []int64, alignCorners bool)(retVal Tensor) { + + retVal, err := AffineGridGeneratorBackward(grad, size, alignCorners) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustAlias(del bool)(retVal Tensor) { + + retVal, err := ts.Alias(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustAlignAs(other Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.AlignAs(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustAll(del bool)(retVal Tensor) { + + retVal, err := ts.All(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustAll1(dim int64, keepdim bool, del bool)(retVal Tensor) { + + retVal, err := ts.All1(dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustAllOut(out Tensor, dim int64, keepdim bool, del bool)(retVal Tensor) { + + retVal, err := ts.AllOut(out, dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustAlphaDropout(input Tensor, p float64, train bool)(retVal Tensor) { + + retVal, err := AlphaDropout(input, p, train) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustAlphaDropout_(p float64, train bool)() { + + err := ts.AlphaDropout_(p, train) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustAngle(del bool)(retVal Tensor) { + + retVal, err := ts.Angle(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustAngleOut(out Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.AngleOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustAny(del bool)(retVal Tensor) { + + retVal, err := ts.Any(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustAny1(dim int64, keepdim bool, del bool)(retVal Tensor) { + + retVal, err := ts.Any1(dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustAnyOut(out Tensor, dim int64, keepdim bool, del bool)(retVal Tensor) { + + retVal, err := ts.AnyOut(out, dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustArange(end Scalar, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { + + retVal, err := Arange(end, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustArange1(start Scalar, end Scalar, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { + + retVal, err := Arange1(start, end, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustArange2(start Scalar, end Scalar, step Scalar, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { + + retVal, err := Arange2(start, end, step, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustArangeOut(out Tensor, end Scalar)(retVal Tensor) { + + retVal, err := ArangeOut(out, end) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustArangeOut1(out Tensor, start Scalar, end Scalar)(retVal Tensor) { + + retVal, err := ArangeOut1(out, start, end) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustArgmax(dim int64, keepdim bool, del bool)(retVal Tensor) { + + retVal, err := ts.Argmax(dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustArgmin(dim int64, keepdim bool, del bool)(retVal Tensor) { + + retVal, err := ts.Argmin(dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustArgsort(dim int64, descending bool, del bool)(retVal Tensor) { + + retVal, err := ts.Argsort(dim, descending, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustAsStrided(size []int64, stride []int64, storageOffset int64, del bool)(retVal Tensor) { + + retVal, err := ts.AsStrided(size, stride, storageOffset, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustAsStrided_(size []int64, stride []int64, storageOffset int64)() { + + err := ts.AsStrided_(size, stride, storageOffset) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustAsin(del bool)(retVal Tensor) { + + retVal, err := ts.Asin(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustAsin_()() { + + err := ts.Asin_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustAsinOut(out Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.AsinOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustAtan(del bool)(retVal Tensor) { + + retVal, err := ts.Atan(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustAtan2(other Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.Atan2(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustAtan2_(other Tensor)() { + + err := ts.Atan2_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustAtan2Out(out Tensor, other Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.Atan2Out(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustAtan_()() { + + err := ts.Atan_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustAtanOut(out Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.AtanOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustAvgPool1d(kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, del bool)(retVal Tensor) { + + retVal, err := ts.AvgPool1d(kernelSize, stride, padding, ceilMode, countIncludePad, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustAvgPool2d(kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride int64, del bool)(retVal Tensor) { + + retVal, err := ts.AvgPool2d(kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustAvgPool2dBackward(gradOutput Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride int64, del bool)(retVal Tensor) { + + retVal, err := ts.AvgPool2dBackward(gradOutput, kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustAvgPool2dBackwardOut(gradInput Tensor, gradOutput Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride int64, del bool)(retVal Tensor) { + + retVal, err := ts.AvgPool2dBackwardOut(gradInput, gradOutput, kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustAvgPool2dOut(out Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride int64, del bool)(retVal Tensor) { + + retVal, err := ts.AvgPool2dOut(out, kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustAvgPool3d(kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride int64, del bool)(retVal Tensor) { + + retVal, err := ts.AvgPool3d(kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustAvgPool3dBackward(gradOutput Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride int64, del bool)(retVal Tensor) { + + retVal, err := ts.AvgPool3dBackward(gradOutput, kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustAvgPool3dBackwardOut(gradInput Tensor, gradOutput Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride int64, del bool)(retVal Tensor) { + + retVal, err := ts.AvgPool3dBackwardOut(gradInput, gradOutput, kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustAvgPool3dOut(out Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride int64, del bool)(retVal Tensor) { + + retVal, err := ts.AvgPool3dOut(out, kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustBaddbmm(batch1 Tensor, batch2 Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.Baddbmm(batch1, batch2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustBaddbmm_(batch1 Tensor, batch2 Tensor)() { + + err := ts.Baddbmm_(batch1, batch2) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustBaddbmmOut(out Tensor, batch1 Tensor, batch2 Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.BaddbmmOut(out, batch1, batch2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustBartlettWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { + + retVal, err := BartlettWindow(windowLength, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustBartlettWindow1(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { + + retVal, err := BartlettWindow1(windowLength, periodic, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustBatchNorm(input Tensor, weight Tensor, bias Tensor, runningMean Tensor, runningVar Tensor, training bool, momentum float64, eps float64, cudnnEnabled bool)(retVal Tensor) { + + retVal, err := BatchNorm(input, weight, bias, runningMean, runningVar, training, momentum, eps, cudnnEnabled) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustBatchNormBackwardElemt(gradOut Tensor, input Tensor, mean Tensor, invstd Tensor, weight Tensor, meanDy Tensor, meanDyXmu Tensor)(retVal Tensor) { + + retVal, err := BatchNormBackwardElemt(gradOut, input, mean, invstd, weight, meanDy, meanDyXmu) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustBatchNormElemt(input Tensor, weight Tensor, bias Tensor, mean Tensor, invstd Tensor, eps float64)(retVal Tensor) { + + retVal, err := BatchNormElemt(input, weight, bias, mean, invstd, eps) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustBatchNormElemtOut(out Tensor, input Tensor, weight Tensor, bias Tensor, mean Tensor, invstd Tensor, eps float64)(retVal Tensor) { + + retVal, err := BatchNormElemtOut(out, input, weight, bias, mean, invstd, eps) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustBernoulli(del bool)(retVal Tensor) { + + retVal, err := ts.Bernoulli(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustBernoulli1(p float64, del bool)(retVal Tensor) { + + retVal, err := ts.Bernoulli1(p, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustBernoulli_(p Tensor)() { + + err := ts.Bernoulli_(p) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustBernoulli1_(p float64)() { + + err := ts.Bernoulli1_(p) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustBernoulliOut(out Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.BernoulliOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustBilinear(input1 Tensor, input2 Tensor, weight Tensor, bias Tensor)(retVal Tensor) { + + retVal, err := Bilinear(input1, input2, weight, bias) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustBinaryCrossEntropy(target Tensor, weight Tensor, reduction int64, del bool)(retVal Tensor) { + + retVal, err := ts.BinaryCrossEntropy(target, weight, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustBinaryCrossEntropyBackward(gradOutput Tensor, target Tensor, weight Tensor, reduction int64, del bool)(retVal Tensor) { + + retVal, err := ts.BinaryCrossEntropyBackward(gradOutput, target, weight, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustBinaryCrossEntropyBackwardOut(gradInput Tensor, gradOutput Tensor, target Tensor, weight Tensor, reduction int64, del bool)(retVal Tensor) { + + retVal, err := ts.BinaryCrossEntropyBackwardOut(gradInput, gradOutput, target, weight, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustBinaryCrossEntropyOut(out Tensor, target Tensor, weight Tensor, reduction int64, del bool)(retVal Tensor) { + + retVal, err := ts.BinaryCrossEntropyOut(out, target, weight, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustBinaryCrossEntropyWithLogits(target Tensor, weight Tensor, posWeight Tensor, reduction int64, del bool)(retVal Tensor) { + + retVal, err := ts.BinaryCrossEntropyWithLogits(target, weight, posWeight, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustBinaryCrossEntropyWithLogitsBackward(gradOutput Tensor, target Tensor, weight Tensor, posWeight Tensor, reduction int64, del bool)(retVal Tensor) { + + retVal, err := ts.BinaryCrossEntropyWithLogitsBackward(gradOutput, target, weight, posWeight, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustBincount(weights Tensor, minlength int64, del bool)(retVal Tensor) { + + retVal, err := ts.Bincount(weights, minlength, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustBitwiseAnd(other Scalar, del bool)(retVal Tensor) { + + retVal, err := ts.BitwiseAnd(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustBitwiseAnd1(other Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.BitwiseAnd1(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustBitwiseAnd_(other Scalar)() { + + err := ts.BitwiseAnd_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustBitwiseAnd1_(other Tensor)() { + + err := ts.BitwiseAnd1_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustBitwiseAndOut(out Tensor, other Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.BitwiseAndOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustBitwiseAndOut1(out Tensor, other Scalar, del bool)(retVal Tensor) { + + retVal, err := ts.BitwiseAndOut1(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustBitwiseNot(del bool)(retVal Tensor) { + + retVal, err := ts.BitwiseNot(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustBitwiseNot_()() { + + err := ts.BitwiseNot_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustBitwiseNotOut(out Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.BitwiseNotOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustBitwiseOr(other Scalar, del bool)(retVal Tensor) { + + retVal, err := ts.BitwiseOr(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustBitwiseOr1(other Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.BitwiseOr1(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustBitwiseOr_(other Scalar)() { + + err := ts.BitwiseOr_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustBitwiseOr1_(other Tensor)() { + + err := ts.BitwiseOr1_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustBitwiseOrOut(out Tensor, other Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.BitwiseOrOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustBitwiseOrOut1(out Tensor, other Scalar, del bool)(retVal Tensor) { + + retVal, err := ts.BitwiseOrOut1(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustBitwiseXor(other Scalar, del bool)(retVal Tensor) { + + retVal, err := ts.BitwiseXor(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustBitwiseXor1(other Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.BitwiseXor1(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustBitwiseXor_(other Scalar)() { + + err := ts.BitwiseXor_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustBitwiseXor1_(other Tensor)() { + + err := ts.BitwiseXor1_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustBitwiseXorOut(out Tensor, other Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.BitwiseXorOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustBitwiseXorOut1(out Tensor, other Scalar, del bool)(retVal Tensor) { + + retVal, err := ts.BitwiseXorOut1(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustBlackmanWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { + + retVal, err := BlackmanWindow(windowLength, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustBlackmanWindow1(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { + + retVal, err := BlackmanWindow1(windowLength, periodic, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustBmm(mat2 Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.Bmm(mat2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustBmmOut(out Tensor, mat2 Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.BmmOut(out, mat2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustCartesianProd(tensors []Tensor)(retVal Tensor) { + + retVal, err := CartesianProd(tensors) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustCat(tensors []Tensor, dim int64)(retVal Tensor) { + + retVal, err := Cat(tensors, dim) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustCatOut(out Tensor, tensors []Tensor, dim int64)(retVal Tensor) { + + retVal, err := CatOut(out, tensors, dim) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustCauchy_(median float64, sigma float64)() { + + err := ts.Cauchy_(median, sigma) + if err != nil { log.Fatal(err) } + + return +} + +func MustCdist(x1 Tensor, x2 Tensor, p float64, computeMode int64)(retVal Tensor) { + + retVal, err := Cdist(x1, x2, p, computeMode) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustCeil(del bool)(retVal Tensor) { + + retVal, err := ts.Ceil(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustCeil_()() { + + err := ts.Ceil_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustCeilOut(out Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.CeilOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustCelu(del bool)(retVal Tensor) { + + retVal, err := ts.Celu(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustCelu_()() { + + err := ts.Celu_() + if err != nil { log.Fatal(err) } + + return +} + +func MustChainMatmul(matrices []Tensor)(retVal Tensor) { + + retVal, err := ChainMatmul(matrices) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustCholesky(upper bool, del bool)(retVal Tensor) { + + retVal, err := ts.Cholesky(upper, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustCholeskyInverse(upper bool, del bool)(retVal Tensor) { + + retVal, err := ts.CholeskyInverse(upper, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustCholeskyInverseOut(out Tensor, upper bool, del bool)(retVal Tensor) { + + retVal, err := ts.CholeskyInverseOut(out, upper, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustCholeskyOut(out Tensor, upper bool, del bool)(retVal Tensor) { + + retVal, err := ts.CholeskyOut(out, upper, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustCholeskySolve(input2 Tensor, upper bool, del bool)(retVal Tensor) { + + retVal, err := ts.CholeskySolve(input2, upper, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustCholeskySolveOut(out Tensor, input2 Tensor, upper bool, del bool)(retVal Tensor) { + + retVal, err := ts.CholeskySolveOut(out, input2, upper, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustClamp(min Scalar, max Scalar, del bool)(retVal Tensor) { + + retVal, err := ts.Clamp(min, max, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustClamp_(min Scalar, max Scalar)() { + + err := ts.Clamp_(min, max) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustClampMax(max Scalar, del bool)(retVal Tensor) { + + retVal, err := ts.ClampMax(max, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustClampMax_(max Scalar)() { + + err := ts.ClampMax_(max) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustClampMaxOut(out Tensor, max Scalar, del bool)(retVal Tensor) { + + retVal, err := ts.ClampMaxOut(out, max, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustClampMin(min Scalar, del bool)(retVal Tensor) { + + retVal, err := ts.ClampMin(min, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustClampMin_(min Scalar)() { + + err := ts.ClampMin_(min) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustClampMinOut(out Tensor, min Scalar, del bool)(retVal Tensor) { + + retVal, err := ts.ClampMinOut(out, min, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustClampOut(out Tensor, min Scalar, max Scalar, del bool)(retVal Tensor) { + + retVal, err := ts.ClampOut(out, min, max, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustCoalesce(del bool)(retVal Tensor) { + + retVal, err := ts.Coalesce(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustCol2im(outputSize []int64, kernelSize []int64, dilation []int64, padding []int64, stride []int64, del bool)(retVal Tensor) { + + retVal, err := ts.Col2im(outputSize, kernelSize, dilation, padding, stride, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustCol2imBackward(gradOutput Tensor, kernelSize []int64, dilation []int64, padding []int64, stride []int64)(retVal Tensor) { + + retVal, err := Col2imBackward(gradOutput, kernelSize, dilation, padding, stride) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustCol2imBackwardOut(gradInput Tensor, gradOutput Tensor, kernelSize []int64, dilation []int64, padding []int64, stride []int64)(retVal Tensor) { + + retVal, err := Col2imBackwardOut(gradInput, gradOutput, kernelSize, dilation, padding, stride) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustCol2imOut(out Tensor, outputSize []int64, kernelSize []int64, dilation []int64, padding []int64, stride []int64, del bool)(retVal Tensor) { + + retVal, err := ts.Col2imOut(out, outputSize, kernelSize, dilation, padding, stride, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustCombinations(r int64, withReplacement bool, del bool)(retVal Tensor) { + + retVal, err := ts.Combinations(r, withReplacement, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustConj(del bool)(retVal Tensor) { + + retVal, err := ts.Conj(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustConjOut(out Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.ConjOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustConstantPadNd(pad []int64, del bool)(retVal Tensor) { + + retVal, err := ts.ConstantPadNd(pad, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustContiguous(del bool)(retVal Tensor) { + + retVal, err := ts.Contiguous(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustConv1d(input Tensor, weight Tensor, bias Tensor, stride []int64, padding []int64, dilation []int64, groups int64)(retVal Tensor) { + + retVal, err := Conv1d(input, weight, bias, stride, padding, dilation, groups) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustConv2d(input Tensor, weight Tensor, bias Tensor, stride []int64, padding []int64, dilation []int64, groups int64)(retVal Tensor) { + + retVal, err := Conv2d(input, weight, bias, stride, padding, dilation, groups) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustConv3d(input Tensor, weight Tensor, bias Tensor, stride []int64, padding []int64, dilation []int64, groups int64)(retVal Tensor) { + + retVal, err := Conv3d(input, weight, bias, stride, padding, dilation, groups) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustConvTbc(weight Tensor, bias Tensor, pad int64, del bool)(retVal Tensor) { + + retVal, err := ts.ConvTbc(weight, bias, pad, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustConvTranspose1d(input Tensor, weight Tensor, bias Tensor, stride []int64, padding []int64, outputPadding []int64, groups int64, dilation []int64)(retVal Tensor) { + + retVal, err := ConvTranspose1d(input, weight, bias, stride, padding, outputPadding, groups, dilation) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustConvTranspose2d(input Tensor, weight Tensor, bias Tensor, stride []int64, padding []int64, outputPadding []int64, groups int64, dilation []int64)(retVal Tensor) { + + retVal, err := ConvTranspose2d(input, weight, bias, stride, padding, outputPadding, groups, dilation) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustConvTranspose3d(input Tensor, weight Tensor, bias Tensor, stride []int64, padding []int64, outputPadding []int64, groups int64, dilation []int64)(retVal Tensor) { + + retVal, err := ConvTranspose3d(input, weight, bias, stride, padding, outputPadding, groups, dilation) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustConvolution(input Tensor, weight Tensor, bias Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64, groups int64)(retVal Tensor) { + + retVal, err := Convolution(input, weight, bias, stride, padding, dilation, transposed, outputPadding, groups) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustConvolutionOverrideable(input Tensor, weight Tensor, bias Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64, groups int64)(retVal Tensor) { + + retVal, err := ConvolutionOverrideable(input, weight, bias, stride, padding, dilation, transposed, outputPadding, groups) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustCopySparseToSparse_(src Tensor, nonBlocking bool)() { + + err := ts.CopySparseToSparse_(src, nonBlocking) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustCos(del bool)(retVal Tensor) { + + retVal, err := ts.Cos(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustCos_()() { + + err := ts.Cos_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustCosOut(out Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.CosOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustCosh(del bool)(retVal Tensor) { + + retVal, err := ts.Cosh(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustCosh_()() { + + err := ts.Cosh_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustCoshOut(out Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.CoshOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustCosineEmbeddingLoss(input1 Tensor, input2 Tensor, target Tensor, margin float64, reduction int64)(retVal Tensor) { + + retVal, err := CosineEmbeddingLoss(input1, input2, target, margin, reduction) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustCosineSimilarity(x1 Tensor, x2 Tensor, dim int64, eps float64)(retVal Tensor) { + + retVal, err := CosineSimilarity(x1, x2, dim, eps) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustCross(other Tensor, dim int64, del bool)(retVal Tensor) { + + retVal, err := ts.Cross(other, dim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustCrossOut(out Tensor, other Tensor, dim int64, del bool)(retVal Tensor) { + + retVal, err := ts.CrossOut(out, other, dim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustCtcLoss(logProbs Tensor, targets Tensor, inputLengths []int64, targetLengths []int64, blank int64, reduction int64, zeroInfinity bool)(retVal Tensor) { + + retVal, err := CtcLoss(logProbs, targets, inputLengths, targetLengths, blank, reduction, zeroInfinity) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustCtcLoss1(logProbs Tensor, targets Tensor, inputLengths Tensor, targetLengths Tensor, blank int64, reduction int64, zeroInfinity bool)(retVal Tensor) { + + retVal, err := CtcLoss1(logProbs, targets, inputLengths, targetLengths, blank, reduction, zeroInfinity) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustCudnnAffineGridGenerator(theta Tensor, n int64, c int64, h int64, w int64)(retVal Tensor) { + + retVal, err := CudnnAffineGridGenerator(theta, n, c, h, w) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustCudnnAffineGridGeneratorBackward(grad Tensor, n int64, c int64, h int64, w int64)(retVal Tensor) { + + retVal, err := CudnnAffineGridGeneratorBackward(grad, n, c, h, w) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustCudnnConvolution(weight Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal Tensor) { + + retVal, err := ts.CudnnConvolution(weight, padding, stride, dilation, groups, benchmark, deterministic, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustCudnnConvolution1(weight Tensor, bias Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal Tensor) { + + retVal, err := ts.CudnnConvolution1(weight, bias, padding, stride, dilation, groups, benchmark, deterministic, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustCudnnConvolutionBackwardInput(selfSize []int64, gradOutput Tensor, weight Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool)(retVal Tensor) { + + retVal, err := CudnnConvolutionBackwardInput(selfSize, gradOutput, weight, padding, stride, dilation, groups, benchmark, deterministic) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustCudnnConvolutionBackwardWeight(weightSize []int64, gradOutput Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal Tensor) { + + retVal, err := ts.CudnnConvolutionBackwardWeight(weightSize, gradOutput, padding, stride, dilation, groups, benchmark, deterministic, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustCudnnConvolutionTranspose(weight Tensor, padding []int64, outputPadding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal Tensor) { + + retVal, err := ts.CudnnConvolutionTranspose(weight, padding, outputPadding, stride, dilation, groups, benchmark, deterministic, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustCudnnConvolutionTranspose1(weight Tensor, bias Tensor, padding []int64, outputPadding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal Tensor) { + + retVal, err := ts.CudnnConvolutionTranspose1(weight, bias, padding, outputPadding, stride, dilation, groups, benchmark, deterministic, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustCudnnConvolutionTransposeBackwardInput(gradOutput Tensor, weight Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool)(retVal Tensor) { + + retVal, err := CudnnConvolutionTransposeBackwardInput(gradOutput, weight, padding, stride, dilation, groups, benchmark, deterministic) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustCudnnConvolutionTransposeBackwardWeight(weightSize []int64, gradOutput Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal Tensor) { + + retVal, err := ts.CudnnConvolutionTransposeBackwardWeight(weightSize, gradOutput, padding, stride, dilation, groups, benchmark, deterministic, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustCudnnGridSampler(grid Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.CudnnGridSampler(grid, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustCumprod(dim int64, dtype gotch.DType, del bool)(retVal Tensor) { + + retVal, err := ts.Cumprod(dim, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustCumprodOut(out Tensor, dim int64, dtype gotch.DType, del bool)(retVal Tensor) { + + retVal, err := ts.CumprodOut(out, dim, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustCumsum(dim int64, dtype gotch.DType, del bool)(retVal Tensor) { + + retVal, err := ts.Cumsum(dim, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustCumsumOut(out Tensor, dim int64, dtype gotch.DType, del bool)(retVal Tensor) { + + retVal, err := ts.CumsumOut(out, dim, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustData(del bool)(retVal Tensor) { + + retVal, err := ts.Data(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustDequantize(del bool)(retVal Tensor) { + + retVal, err := ts.Dequantize(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustDet(del bool)(retVal Tensor) { + + retVal, err := ts.Det(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustDetach(del bool)(retVal Tensor) { + + retVal, err := ts.Detach(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustDetach_()() { + + err := ts.Detach_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustDiag(diagonal int64, del bool)(retVal Tensor) { + + retVal, err := ts.Diag(diagonal, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustDiagEmbed(offset int64, dim1 int64, dim2 int64, del bool)(retVal Tensor) { + + retVal, err := ts.DiagEmbed(offset, dim1, dim2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustDiagOut(out Tensor, diagonal int64, del bool)(retVal Tensor) { + + retVal, err := ts.DiagOut(out, diagonal, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustDiagflat(offset int64, del bool)(retVal Tensor) { + + retVal, err := ts.Diagflat(offset, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustDiagonal(offset int64, dim1 int64, dim2 int64, del bool)(retVal Tensor) { + + retVal, err := ts.Diagonal(offset, dim1, dim2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustDigamma(del bool)(retVal Tensor) { + + retVal, err := ts.Digamma(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustDigamma_()() { + + err := ts.Digamma_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustDigammaOut(out Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.DigammaOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustDist(other Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.Dist(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustDiv(other Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.Div(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustDiv1(other Scalar, del bool)(retVal Tensor) { + + retVal, err := ts.Div1(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustDiv_(other Tensor)() { + + err := ts.Div_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustDiv1_(other Scalar)() { + + err := ts.Div1_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustDivOut(out Tensor, other Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.DivOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustDot(tensor Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.Dot(tensor, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustDotOut(out Tensor, tensor Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.DotOut(out, tensor, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustDropout(input Tensor, p float64, train bool)(retVal Tensor) { + + retVal, err := Dropout(input, p, train) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustDropout_(p float64, train bool)() { + + err := ts.Dropout_(p, train) + if err != nil { log.Fatal(err) } + + return +} + +func MustEinsum(equation string, tensors []Tensor)(retVal Tensor) { + + retVal, err := Einsum(equation, tensors) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustElu(del bool)(retVal Tensor) { + + retVal, err := ts.Elu(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustElu_()() { + + err := ts.Elu_() + if err != nil { log.Fatal(err) } + + return +} + +func MustEluBackward(gradOutput Tensor, alpha Scalar, scale Scalar, inputScale Scalar, output Tensor)(retVal Tensor) { + + retVal, err := EluBackward(gradOutput, alpha, scale, inputScale, output) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustEluBackwardOut(gradInput Tensor, gradOutput Tensor, alpha Scalar, scale Scalar, inputScale Scalar, output Tensor)(retVal Tensor) { + + retVal, err := EluBackwardOut(gradInput, gradOutput, alpha, scale, inputScale, output) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustEluOut(out Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.EluOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustEmbedding(weight Tensor, indices Tensor, paddingIdx int64, scaleGradByFreq bool, sparse bool)(retVal Tensor) { + + retVal, err := Embedding(weight, indices, paddingIdx, scaleGradByFreq, sparse) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustEmbeddingBackward(grad Tensor, indices Tensor, numWeights int64, paddingIdx int64, scaleGradByFreq bool, sparse bool)(retVal Tensor) { + + retVal, err := EmbeddingBackward(grad, indices, numWeights, paddingIdx, scaleGradByFreq, sparse) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustEmbeddingDenseBackward(gradOutput Tensor, indices Tensor, numWeights int64, paddingIdx int64, scaleGradByFreq bool)(retVal Tensor) { + + retVal, err := EmbeddingDenseBackward(gradOutput, indices, numWeights, paddingIdx, scaleGradByFreq) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustEmbeddingRenorm_(indices Tensor, maxNorm float64, normType float64)() { + + err := ts.EmbeddingRenorm_(indices, maxNorm, normType) + if err != nil { log.Fatal(err) } + + return +} + +func MustEmbeddingSparseBackward(grad Tensor, indices Tensor, numWeights int64, paddingIdx int64, scaleGradByFreq bool)(retVal Tensor) { + + retVal, err := EmbeddingSparseBackward(grad, indices, numWeights, paddingIdx, scaleGradByFreq) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustEmpty(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { + + retVal, err := Empty(size, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustEmptyLike(del bool)(retVal Tensor) { + + retVal, err := ts.EmptyLike(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustEmptyOut(out Tensor, size []int64)(retVal Tensor) { + + retVal, err := EmptyOut(out, size) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustEmptyStrided(size []int64, stride []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { + + retVal, err := EmptyStrided(size, stride, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustEq(other Scalar, del bool)(retVal Tensor) { + + retVal, err := ts.Eq(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustEq1(other Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.Eq1(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustEq_(other Scalar)() { + + err := ts.Eq_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustEq1_(other Tensor)() { + + err := ts.Eq1_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustEqOut(out Tensor, other Scalar, del bool)(retVal Tensor) { + + retVal, err := ts.EqOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustEqOut1(out Tensor, other Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.EqOut1(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustErf(del bool)(retVal Tensor) { + + retVal, err := ts.Erf(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustErf_()() { + + err := ts.Erf_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustErfOut(out Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.ErfOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustErfc(del bool)(retVal Tensor) { + + retVal, err := ts.Erfc(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustErfc_()() { + + err := ts.Erfc_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustErfcOut(out Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.ErfcOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustErfinv(del bool)(retVal Tensor) { + + retVal, err := ts.Erfinv(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustErfinv_()() { + + err := ts.Erfinv_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustErfinvOut(out Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.ErfinvOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustExp(del bool)(retVal Tensor) { + + retVal, err := ts.Exp(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustExp_()() { + + err := ts.Exp_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustExpOut(out Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.ExpOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustExpand(size []int64, implicit bool, del bool)(retVal Tensor) { + + retVal, err := ts.Expand(size, implicit, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustExpandAs(other Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.ExpandAs(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustExpm1(del bool)(retVal Tensor) { + + retVal, err := ts.Expm1(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustExpm1_()() { + + err := ts.Expm1_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustExpm1Out(out Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.Expm1Out(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustExponential_(lambd float64)() { + + err := ts.Exponential_(lambd) + if err != nil { log.Fatal(err) } + + return +} + +func MustEye(n int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { + + retVal, err := Eye(n, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustEye1(n int64, m int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { + + retVal, err := Eye1(n, m, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustEyeOut(out Tensor, n int64)(retVal Tensor) { + + retVal, err := EyeOut(out, n) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustEyeOut1(out Tensor, n int64, m int64)(retVal Tensor) { + + retVal, err := EyeOut1(out, n, m) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustFakeQuantizePerChannelAffine(scale Tensor, zeroPoint Tensor, axis int64, quantMin int64, quantMax int64, del bool)(retVal Tensor) { + + retVal, err := ts.FakeQuantizePerChannelAffine(scale, zeroPoint, axis, quantMin, quantMax, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustFakeQuantizePerChannelAffineBackward(grad Tensor, scale Tensor, zeroPoint Tensor, axis int64, quantMin int64, quantMax int64, del bool)(retVal Tensor) { + + retVal, err := ts.FakeQuantizePerChannelAffineBackward(grad, scale, zeroPoint, axis, quantMin, quantMax, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustFakeQuantizePerTensorAffine(scale float64, zeroPoint int64, quantMin int64, quantMax int64, del bool)(retVal Tensor) { + + retVal, err := ts.FakeQuantizePerTensorAffine(scale, zeroPoint, quantMin, quantMax, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustFakeQuantizePerTensorAffineBackward(grad Tensor, scale float64, zeroPoint int64, quantMin int64, quantMax int64, del bool)(retVal Tensor) { + + retVal, err := ts.FakeQuantizePerTensorAffineBackward(grad, scale, zeroPoint, quantMin, quantMax, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustFbgemmLinearFp16Weight(input Tensor, packedWeight Tensor, bias Tensor)(retVal Tensor) { + + retVal, err := FbgemmLinearFp16Weight(input, packedWeight, bias) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustFbgemmLinearFp16WeightFp32Activation(input Tensor, packedWeight Tensor, bias Tensor)(retVal Tensor) { + + retVal, err := FbgemmLinearFp16WeightFp32Activation(input, packedWeight, bias) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustFbgemmLinearInt8Weight(input Tensor, weight Tensor, packed Tensor, colOffsets Tensor, weightScale Scalar, weightZeroPoint Scalar, bias Tensor)(retVal Tensor) { + + retVal, err := FbgemmLinearInt8Weight(input, weight, packed, colOffsets, weightScale, weightZeroPoint, bias) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustFbgemmLinearInt8WeightFp32Activation(input Tensor, weight Tensor, packed Tensor, colOffsets Tensor, weightScale Scalar, weightZeroPoint Scalar, bias Tensor)(retVal Tensor) { + + retVal, err := FbgemmLinearInt8WeightFp32Activation(input, weight, packed, colOffsets, weightScale, weightZeroPoint, bias) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustFbgemmPackGemmMatrixFp16(input Tensor)(retVal Tensor) { + + retVal, err := FbgemmPackGemmMatrixFp16(input) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustFbgemmPackQuantizedMatrix(input Tensor)(retVal Tensor) { + + retVal, err := FbgemmPackQuantizedMatrix(input) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustFbgemmPackQuantizedMatrix1(input Tensor, k int64, n int64)(retVal Tensor) { + + retVal, err := FbgemmPackQuantizedMatrix1(input, k, n) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustFeatureAlphaDropout(input Tensor, p float64, train bool)(retVal Tensor) { + + retVal, err := FeatureAlphaDropout(input, p, train) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustFeatureAlphaDropout_(p float64, train bool)() { + + err := ts.FeatureAlphaDropout_(p, train) + if err != nil { log.Fatal(err) } + + return +} + +func MustFeatureDropout(input Tensor, p float64, train bool)(retVal Tensor) { + + retVal, err := FeatureDropout(input, p, train) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustFeatureDropout_(p float64, train bool)() { + + err := ts.FeatureDropout_(p, train) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustFft(signalNdim int64, normalized bool, del bool)(retVal Tensor) { + + retVal, err := ts.Fft(signalNdim, normalized, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustFill_(value Scalar)() { + + err := ts.Fill_(value) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustFill1_(value Tensor)() { + + err := ts.Fill1_(value) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustFillDiagonal_(fillValue Scalar, wrap bool)() { + + err := ts.FillDiagonal_(fillValue, wrap) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustFlatten(startDim int64, endDim int64, del bool)(retVal Tensor) { + + retVal, err := ts.Flatten(startDim, endDim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustFlip(dims []int64, del bool)(retVal Tensor) { + + retVal, err := ts.Flip(dims, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustFloor(del bool)(retVal Tensor) { + + retVal, err := ts.Floor(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustFloor_()() { + + err := ts.Floor_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustFloorDivide(other Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.FloorDivide(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustFloorDivide1(other Scalar, del bool)(retVal Tensor) { + + retVal, err := ts.FloorDivide1(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustFloorDivide_(other Tensor)() { + + err := ts.FloorDivide_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustFloorDivide1_(other Scalar)() { + + err := ts.FloorDivide1_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustFloorDivideOut(out Tensor, other Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.FloorDivideOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustFloorOut(out Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.FloorOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustFmod(other Scalar, del bool)(retVal Tensor) { + + retVal, err := ts.Fmod(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustFmod1(other Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.Fmod1(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustFmod_(other Scalar)() { + + err := ts.Fmod_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustFmod1_(other Tensor)() { + + err := ts.Fmod1_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustFmodOut(out Tensor, other Scalar, del bool)(retVal Tensor) { + + retVal, err := ts.FmodOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustFmodOut1(out Tensor, other Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.FmodOut1(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustFrac(del bool)(retVal Tensor) { + + retVal, err := ts.Frac(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustFrac_()() { + + err := ts.Frac_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustFracOut(out Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.FracOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustFractionalMaxPool2dBackward(gradOutput Tensor, kernelSize []int64, outputSize []int64, indices Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.FractionalMaxPool2dBackward(gradOutput, kernelSize, outputSize, indices, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustFractionalMaxPool2dBackwardOut(gradInput Tensor, gradOutput Tensor, kernelSize []int64, outputSize []int64, indices Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.FractionalMaxPool2dBackwardOut(gradInput, gradOutput, kernelSize, outputSize, indices, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustFractionalMaxPool3dBackward(gradOutput Tensor, kernelSize []int64, outputSize []int64, indices Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.FractionalMaxPool3dBackward(gradOutput, kernelSize, outputSize, indices, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustFractionalMaxPool3dBackwardOut(gradInput Tensor, gradOutput Tensor, kernelSize []int64, outputSize []int64, indices Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.FractionalMaxPool3dBackwardOut(gradInput, gradOutput, kernelSize, outputSize, indices, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustFrobeniusNorm(del bool)(retVal Tensor) { + + retVal, err := ts.FrobeniusNorm(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustFrobeniusNorm1(dim []int64, keepdim bool, del bool)(retVal Tensor) { + + retVal, err := ts.FrobeniusNorm1(dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustFrobeniusNormOut(out Tensor, dim []int64, keepdim bool, del bool)(retVal Tensor) { + + retVal, err := ts.FrobeniusNormOut(out, dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustFromFile(filename string, shared bool, size int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { + + retVal, err := FromFile(filename, shared, size, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustFull(size []int64, fillValue Scalar, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { + + retVal, err := Full(size, fillValue, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustFullLike(fillValue Scalar, del bool)(retVal Tensor) { + + retVal, err := ts.FullLike(fillValue, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustFullOut(out Tensor, size []int64, fillValue Scalar)(retVal Tensor) { + + retVal, err := FullOut(out, size, fillValue) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustGather(dim int64, index Tensor, sparseGrad bool, del bool)(retVal Tensor) { + + retVal, err := ts.Gather(dim, index, sparseGrad, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustGatherOut(out Tensor, dim int64, index Tensor, sparseGrad bool, del bool)(retVal Tensor) { + + retVal, err := ts.GatherOut(out, dim, index, sparseGrad, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustGe(other Scalar, del bool)(retVal Tensor) { + + retVal, err := ts.Ge(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustGe1(other Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.Ge1(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustGe_(other Scalar)() { + + err := ts.Ge_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustGe1_(other Tensor)() { + + err := ts.Ge1_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustGeOut(out Tensor, other Scalar, del bool)(retVal Tensor) { + + retVal, err := ts.GeOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustGeOut1(out Tensor, other Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.GeOut1(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustGelu(del bool)(retVal Tensor) { + + retVal, err := ts.Gelu(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustGeluBackward(grad Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.GeluBackward(grad, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustGeometric_(p float64)() { + + err := ts.Geometric_(p) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustGer(vec2 Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.Ger(vec2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustGerOut(out Tensor, vec2 Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.GerOut(out, vec2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustGlu(dim int64, del bool)(retVal Tensor) { + + retVal, err := ts.Glu(dim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustGluBackward(gradOutput Tensor, dim int64, del bool)(retVal Tensor) { + + retVal, err := ts.GluBackward(gradOutput, dim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustGluBackwardOut(gradInput Tensor, gradOutput Tensor, dim int64, del bool)(retVal Tensor) { + + retVal, err := ts.GluBackwardOut(gradInput, gradOutput, dim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustGluOut(out Tensor, dim int64, del bool)(retVal Tensor) { + + retVal, err := ts.GluOut(out, dim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustGrad(del bool)(retVal Tensor) { + + retVal, err := ts.Grad(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustGridSampler(input Tensor, grid Tensor, interpolationMode int64, paddingMode int64, alignCorners bool)(retVal Tensor) { + + retVal, err := GridSampler(input, grid, interpolationMode, paddingMode, alignCorners) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustGridSampler2d(input Tensor, grid Tensor, interpolationMode int64, paddingMode int64, alignCorners bool)(retVal Tensor) { + + retVal, err := GridSampler2d(input, grid, interpolationMode, paddingMode, alignCorners) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustGridSampler3d(input Tensor, grid Tensor, interpolationMode int64, paddingMode int64, alignCorners bool)(retVal Tensor) { + + retVal, err := GridSampler3d(input, grid, interpolationMode, paddingMode, alignCorners) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustGroupNorm(input Tensor, numGroups int64, weight Tensor, bias Tensor, eps float64, cudnnEnabled bool)(retVal Tensor) { + + retVal, err := GroupNorm(input, numGroups, weight, bias, eps, cudnnEnabled) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustGruCell(input Tensor, hx Tensor, wIh Tensor, wHh Tensor, bIh Tensor, bHh Tensor)(retVal Tensor) { + + retVal, err := GruCell(input, hx, wIh, wHh, bIh, bHh) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustGt(other Scalar, del bool)(retVal Tensor) { + + retVal, err := ts.Gt(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustGt1(other Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.Gt1(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustGt_(other Scalar)() { + + err := ts.Gt_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustGt1_(other Tensor)() { + + err := ts.Gt1_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustGtOut(out Tensor, other Scalar, del bool)(retVal Tensor) { + + retVal, err := ts.GtOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustGtOut1(out Tensor, other Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.GtOut1(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustHammingWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { + + retVal, err := HammingWindow(windowLength, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustHammingWindow1(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { + + retVal, err := HammingWindow1(windowLength, periodic, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustHammingWindow2(windowLength int64, periodic bool, alpha float64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { + + retVal, err := HammingWindow2(windowLength, periodic, alpha, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustHammingWindow3(windowLength int64, periodic bool, alpha float64, beta float64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { + + retVal, err := HammingWindow3(windowLength, periodic, alpha, beta, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustHannWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { + + retVal, err := HannWindow(windowLength, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustHannWindow1(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { + + retVal, err := HannWindow1(windowLength, periodic, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustHardshrink(del bool)(retVal Tensor) { + + retVal, err := ts.Hardshrink(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustHardshrinkBackward(gradOut Tensor, lambd Scalar, del bool)(retVal Tensor) { + + retVal, err := ts.HardshrinkBackward(gradOut, lambd, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustHardsigmoid(del bool)(retVal Tensor) { + + retVal, err := ts.Hardsigmoid(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustHardsigmoid_()() { + + err := ts.Hardsigmoid_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustHardsigmoidBackward(gradOutput Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.HardsigmoidBackward(gradOutput, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustHardsigmoidOut(out Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.HardsigmoidOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustHardtanh(del bool)(retVal Tensor) { + + retVal, err := ts.Hardtanh(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustHardtanh_()() { + + err := ts.Hardtanh_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustHardtanhBackward(gradOutput Tensor, minVal Scalar, maxVal Scalar, del bool)(retVal Tensor) { + + retVal, err := ts.HardtanhBackward(gradOutput, minVal, maxVal, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustHardtanhBackwardOut(gradInput Tensor, gradOutput Tensor, minVal Scalar, maxVal Scalar, del bool)(retVal Tensor) { + + retVal, err := ts.HardtanhBackwardOut(gradInput, gradOutput, minVal, maxVal, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustHardtanhOut(out Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.HardtanhOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustHingeEmbeddingLoss(target Tensor, margin float64, reduction int64, del bool)(retVal Tensor) { + + retVal, err := ts.HingeEmbeddingLoss(target, margin, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustHistc(bins int64, del bool)(retVal Tensor) { + + retVal, err := ts.Histc(bins, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustHistcOut(out Tensor, bins int64, del bool)(retVal Tensor) { + + retVal, err := ts.HistcOut(out, bins, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustHspmm(mat1 Tensor, mat2 Tensor)(retVal Tensor) { + + retVal, err := Hspmm(mat1, mat2) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustHspmmOut(out Tensor, mat1 Tensor, mat2 Tensor)(retVal Tensor) { + + retVal, err := HspmmOut(out, mat1, mat2) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustIfft(signalNdim int64, normalized bool, del bool)(retVal Tensor) { + + retVal, err := ts.Ifft(signalNdim, normalized, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustIm2col(kernelSize []int64, dilation []int64, padding []int64, stride []int64, del bool)(retVal Tensor) { + + retVal, err := ts.Im2col(kernelSize, dilation, padding, stride, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustIm2colBackward(gradOutput Tensor, inputSize []int64, kernelSize []int64, dilation []int64, padding []int64, stride []int64)(retVal Tensor) { + + retVal, err := Im2colBackward(gradOutput, inputSize, kernelSize, dilation, padding, stride) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustIm2colBackwardOut(gradInput Tensor, gradOutput Tensor, inputSize []int64, kernelSize []int64, dilation []int64, padding []int64, stride []int64)(retVal Tensor) { + + retVal, err := Im2colBackwardOut(gradInput, gradOutput, inputSize, kernelSize, dilation, padding, stride) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustIm2colOut(out Tensor, kernelSize []int64, dilation []int64, padding []int64, stride []int64, del bool)(retVal Tensor) { + + retVal, err := ts.Im2colOut(out, kernelSize, dilation, padding, stride, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustImag(del bool)(retVal Tensor) { + + retVal, err := ts.Imag(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustIndex(indices []Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.Index(indices, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustIndexAdd(dim int64, index Tensor, source Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.IndexAdd(dim, index, source, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustIndexAdd_(dim int64, index Tensor, source Tensor)() { + + err := ts.IndexAdd_(dim, index, source) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustIndexCopy(dim int64, index Tensor, source Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.IndexCopy(dim, index, source, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustIndexCopy_(dim int64, index Tensor, source Tensor)() { + + err := ts.IndexCopy_(dim, index, source) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustIndexFill(dim int64, index Tensor, value Scalar, del bool)(retVal Tensor) { + + retVal, err := ts.IndexFill(dim, index, value, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustIndexFill1(dim int64, index Tensor, value Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.IndexFill1(dim, index, value, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustIndexFill_(dim int64, index Tensor, value Scalar)() { + + err := ts.IndexFill_(dim, index, value) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustIndexFill1_(dim int64, index Tensor, value Tensor)() { + + err := ts.IndexFill1_(dim, index, value) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustIndexPut(indices []Tensor, values Tensor, accumulate bool, del bool)(retVal Tensor) { + + retVal, err := ts.IndexPut(indices, values, accumulate, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustIndexPut_(indices []Tensor, values Tensor, accumulate bool)() { + + err := ts.IndexPut_(indices, values, accumulate) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustIndexSelect(dim int64, index Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.IndexSelect(dim, index, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustIndexSelectOut(out Tensor, dim int64, index Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.IndexSelectOut(out, dim, index, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustIndices(del bool)(retVal Tensor) { + + retVal, err := ts.Indices(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustInstanceNorm(input Tensor, weight Tensor, bias Tensor, runningMean Tensor, runningVar Tensor, useInputStats bool, momentum float64, eps float64, cudnnEnabled bool)(retVal Tensor) { + + retVal, err := InstanceNorm(input, weight, bias, runningMean, runningVar, useInputStats, momentum, eps, cudnnEnabled) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustIntRepr(del bool)(retVal Tensor) { + + retVal, err := ts.IntRepr(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustInverse(del bool)(retVal Tensor) { + + retVal, err := ts.Inverse(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustInverseOut(out Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.InverseOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustIrfft(signalNdim int64, normalized bool, onesided bool, signalSizes []int64, del bool)(retVal Tensor) { + + retVal, err := ts.Irfft(signalNdim, normalized, onesided, signalSizes, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustIsclose(other Tensor, rtol float64, atol float64, equalNan bool, del bool)(retVal Tensor) { + + retVal, err := ts.Isclose(other, rtol, atol, equalNan, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustIsfinite(del bool)(retVal Tensor) { + + retVal, err := ts.Isfinite(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustIsinf(del bool)(retVal Tensor) { + + retVal, err := ts.Isinf(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustIsnan(del bool)(retVal Tensor) { + + retVal, err := ts.Isnan(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustKlDiv(target Tensor, reduction int64, del bool)(retVal Tensor) { + + retVal, err := ts.KlDiv(target, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustKlDivBackward(gradOutput Tensor, target Tensor, reduction int64, del bool)(retVal Tensor) { + + retVal, err := ts.KlDivBackward(gradOutput, target, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustL1Loss(target Tensor, reduction int64, del bool)(retVal Tensor) { + + retVal, err := ts.L1Loss(target, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustL1LossBackward(gradOutput Tensor, target Tensor, reduction int64, del bool)(retVal Tensor) { + + retVal, err := ts.L1LossBackward(gradOutput, target, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustL1LossBackwardOut(gradInput Tensor, gradOutput Tensor, target Tensor, reduction int64, del bool)(retVal Tensor) { + + retVal, err := ts.L1LossBackwardOut(gradInput, gradOutput, target, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustL1LossOut(out Tensor, target Tensor, reduction int64, del bool)(retVal Tensor) { + + retVal, err := ts.L1LossOut(out, target, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustLayerNorm(input Tensor, normalizedShape []int64, weight Tensor, bias Tensor, eps float64, cudnnEnable bool)(retVal Tensor) { + + retVal, err := LayerNorm(input, normalizedShape, weight, bias, eps, cudnnEnable) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustLe(other Scalar, del bool)(retVal Tensor) { + + retVal, err := ts.Le(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustLe1(other Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.Le1(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustLe_(other Scalar)() { + + err := ts.Le_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustLe1_(other Tensor)() { + + err := ts.Le1_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustLeOut(out Tensor, other Scalar, del bool)(retVal Tensor) { + + retVal, err := ts.LeOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustLeOut1(out Tensor, other Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.LeOut1(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustLeakyRelu(del bool)(retVal Tensor) { + + retVal, err := ts.LeakyRelu(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustLeakyRelu_()() { + + err := ts.LeakyRelu_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustLeakyReluBackward(gradOutput Tensor, negativeSlope Scalar, selfIsResult bool, del bool)(retVal Tensor) { + + retVal, err := ts.LeakyReluBackward(gradOutput, negativeSlope, selfIsResult, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustLeakyReluOut(out Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.LeakyReluOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustLerp(end Tensor, weight Scalar, del bool)(retVal Tensor) { + + retVal, err := ts.Lerp(end, weight, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustLerp1(end Tensor, weight Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.Lerp1(end, weight, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustLerp_(end Tensor, weight Scalar)() { + + err := ts.Lerp_(end, weight) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustLerp1_(end Tensor, weight Tensor)() { + + err := ts.Lerp1_(end, weight) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustLerpOut(out Tensor, end Tensor, weight Scalar, del bool)(retVal Tensor) { + + retVal, err := ts.LerpOut(out, end, weight, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustLerpOut1(out Tensor, end Tensor, weight Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.LerpOut1(out, end, weight, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustLgamma(del bool)(retVal Tensor) { + + retVal, err := ts.Lgamma(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustLgamma_()() { + + err := ts.Lgamma_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustLgammaOut(out Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.LgammaOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustLinear(input Tensor, weight Tensor, bias Tensor)(retVal Tensor) { + + retVal, err := Linear(input, weight, bias) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustLinspace(start Scalar, end Scalar, steps int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { + + retVal, err := Linspace(start, end, steps, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustLinspaceOut(out Tensor, start Scalar, end Scalar, steps int64)(retVal Tensor) { + + retVal, err := LinspaceOut(out, start, end, steps) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustLog(del bool)(retVal Tensor) { + + retVal, err := ts.Log(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustLog10(del bool)(retVal Tensor) { + + retVal, err := ts.Log10(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustLog10_()() { + + err := ts.Log10_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustLog10Out(out Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.Log10Out(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustLog1p(del bool)(retVal Tensor) { + + retVal, err := ts.Log1p(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustLog1p_()() { + + err := ts.Log1p_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustLog1pOut(out Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.Log1pOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustLog2(del bool)(retVal Tensor) { + + retVal, err := ts.Log2(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustLog2_()() { + + err := ts.Log2_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustLog2Out(out Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.Log2Out(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustLog_()() { + + err := ts.Log_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustLogNormal_(mean float64, std float64)() { + + err := ts.LogNormal_(mean, std) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustLogOut(out Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.LogOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustLogSigmoid(del bool)(retVal Tensor) { + + retVal, err := ts.LogSigmoid(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustLogSigmoidBackward(gradOutput Tensor, buffer Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.LogSigmoidBackward(gradOutput, buffer, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustLogSigmoidBackwardOut(gradInput Tensor, gradOutput Tensor, buffer Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.LogSigmoidBackwardOut(gradInput, gradOutput, buffer, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustLogSigmoidOut(out Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.LogSigmoidOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustLogSoftmax(dim int64, dtype gotch.DType, del bool)(retVal Tensor) { + + retVal, err := ts.LogSoftmax(dim, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustLogdet(del bool)(retVal Tensor) { + + retVal, err := ts.Logdet(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustLogicalAnd(other Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.LogicalAnd(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustLogicalAnd_(other Tensor)() { + + err := ts.LogicalAnd_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustLogicalAndOut(out Tensor, other Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.LogicalAndOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustLogicalNot(del bool)(retVal Tensor) { + + retVal, err := ts.LogicalNot(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustLogicalNot_()() { + + err := ts.LogicalNot_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustLogicalNotOut(out Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.LogicalNotOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustLogicalOr(other Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.LogicalOr(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustLogicalOr_(other Tensor)() { + + err := ts.LogicalOr_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustLogicalOrOut(out Tensor, other Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.LogicalOrOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustLogicalXor(other Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.LogicalXor(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustLogicalXor_(other Tensor)() { + + err := ts.LogicalXor_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustLogicalXorOut(out Tensor, other Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.LogicalXorOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustLogspace(start Scalar, end Scalar, steps int64, base float64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { + + retVal, err := Logspace(start, end, steps, base, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustLogspaceOut(out Tensor, start Scalar, end Scalar, steps int64, base float64)(retVal Tensor) { + + retVal, err := LogspaceOut(out, start, end, steps, base) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustLogsumexp(dim []int64, keepdim bool, del bool)(retVal Tensor) { + + retVal, err := ts.Logsumexp(dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustLogsumexpOut(out Tensor, dim []int64, keepdim bool, del bool)(retVal Tensor) { + + retVal, err := ts.LogsumexpOut(out, dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustLt(other Scalar, del bool)(retVal Tensor) { + + retVal, err := ts.Lt(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustLt1(other Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.Lt1(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustLt_(other Scalar)() { + + err := ts.Lt_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustLt1_(other Tensor)() { + + err := ts.Lt1_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustLtOut(out Tensor, other Scalar, del bool)(retVal Tensor) { + + retVal, err := ts.LtOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustLtOut1(out Tensor, other Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.LtOut1(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustLuSolve(lUData Tensor, lUPivots Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.LuSolve(lUData, lUPivots, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustLuSolveOut(out Tensor, lUData Tensor, lUPivots Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.LuSolveOut(out, lUData, lUPivots, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustMarginRankingLoss(input1 Tensor, input2 Tensor, target Tensor, margin float64, reduction int64)(retVal Tensor) { + + retVal, err := MarginRankingLoss(input1, input2, target, margin, reduction) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustMaskedFill(mask Tensor, value Scalar, del bool)(retVal Tensor) { + + retVal, err := ts.MaskedFill(mask, value, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustMaskedFill1(mask Tensor, value Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.MaskedFill1(mask, value, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustMaskedFill_(mask Tensor, value Scalar)() { + + err := ts.MaskedFill_(mask, value) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustMaskedFill1_(mask Tensor, value Tensor)() { + + err := ts.MaskedFill1_(mask, value) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustMaskedScatter(mask Tensor, source Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.MaskedScatter(mask, source, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustMaskedScatter_(mask Tensor, source Tensor)() { + + err := ts.MaskedScatter_(mask, source) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustMaskedSelect(mask Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.MaskedSelect(mask, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustMaskedSelectOut(out Tensor, mask Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.MaskedSelectOut(out, mask, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustMatmul(other Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.Matmul(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustMatmulOut(out Tensor, other Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.MatmulOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustMatrixPower(n int64, del bool)(retVal Tensor) { + + retVal, err := ts.MatrixPower(n, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustMatrixRank(symmetric bool, del bool)(retVal Tensor) { + + retVal, err := ts.MatrixRank(symmetric, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustMatrixRank1(tol float64, symmetric bool, del bool)(retVal Tensor) { + + retVal, err := ts.MatrixRank1(tol, symmetric, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustMax(del bool)(retVal Tensor) { + + retVal, err := ts.Max(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustMax1(other Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.Max1(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustMaxOut(out Tensor, other Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.MaxOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustMaxPool1d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal Tensor) { + + retVal, err := ts.MaxPool1d(kernelSize, stride, padding, dilation, ceilMode, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustMaxPool2d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal Tensor) { + + retVal, err := ts.MaxPool2d(kernelSize, stride, padding, dilation, ceilMode, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustMaxPool2dWithIndicesBackward(gradOutput Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, indices Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.MaxPool2dWithIndicesBackward(gradOutput, kernelSize, stride, padding, dilation, ceilMode, indices, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustMaxPool2dWithIndicesBackwardOut(gradInput Tensor, gradOutput Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, indices Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.MaxPool2dWithIndicesBackwardOut(gradInput, gradOutput, kernelSize, stride, padding, dilation, ceilMode, indices, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustMaxPool3d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal Tensor) { + + retVal, err := ts.MaxPool3d(kernelSize, stride, padding, dilation, ceilMode, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustMaxPool3dWithIndicesBackward(gradOutput Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, indices Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.MaxPool3dWithIndicesBackward(gradOutput, kernelSize, stride, padding, dilation, ceilMode, indices, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustMaxPool3dWithIndicesBackwardOut(gradInput Tensor, gradOutput Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, indices Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.MaxPool3dWithIndicesBackwardOut(gradInput, gradOutput, kernelSize, stride, padding, dilation, ceilMode, indices, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustMaxUnpool2d(indices Tensor, outputSize []int64, del bool)(retVal Tensor) { + + retVal, err := ts.MaxUnpool2d(indices, outputSize, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustMaxUnpool2dBackward(gradOutput Tensor, indices Tensor, outputSize []int64, del bool)(retVal Tensor) { + + retVal, err := ts.MaxUnpool2dBackward(gradOutput, indices, outputSize, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustMaxUnpool2dBackwardOut(gradInput Tensor, gradOutput Tensor, indices Tensor, outputSize []int64, del bool)(retVal Tensor) { + + retVal, err := ts.MaxUnpool2dBackwardOut(gradInput, gradOutput, indices, outputSize, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustMaxUnpool2dOut(out Tensor, indices Tensor, outputSize []int64, del bool)(retVal Tensor) { + + retVal, err := ts.MaxUnpool2dOut(out, indices, outputSize, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustMaxUnpool3d(indices Tensor, outputSize []int64, stride []int64, padding []int64, del bool)(retVal Tensor) { + + retVal, err := ts.MaxUnpool3d(indices, outputSize, stride, padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustMaxUnpool3dBackward(gradOutput Tensor, indices Tensor, outputSize []int64, stride []int64, padding []int64, del bool)(retVal Tensor) { + + retVal, err := ts.MaxUnpool3dBackward(gradOutput, indices, outputSize, stride, padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustMaxUnpool3dBackwardOut(gradInput Tensor, gradOutput Tensor, indices Tensor, outputSize []int64, stride []int64, padding []int64, del bool)(retVal Tensor) { + + retVal, err := ts.MaxUnpool3dBackwardOut(gradInput, gradOutput, indices, outputSize, stride, padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustMaxUnpool3dOut(out Tensor, indices Tensor, outputSize []int64, stride []int64, padding []int64, del bool)(retVal Tensor) { + + retVal, err := ts.MaxUnpool3dOut(out, indices, outputSize, stride, padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustMaxValues(dim []int64, keepdim bool, del bool)(retVal Tensor) { + + retVal, err := ts.MaxValues(dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustMean(dtype gotch.DType, del bool)(retVal Tensor) { + + retVal, err := ts.Mean(dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustMean1(dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal Tensor) { + + retVal, err := ts.Mean1(dim, keepdim, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustMeanOut(out Tensor, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal Tensor) { + + retVal, err := ts.MeanOut(out, dim, keepdim, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustMedian(del bool)(retVal Tensor) { + + retVal, err := ts.Median(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustMin(del bool)(retVal Tensor) { + + retVal, err := ts.Min(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustMin1(other Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.Min1(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustMinOut(out Tensor, other Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.MinOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustMinValues(dim []int64, keepdim bool, del bool)(retVal Tensor) { + + retVal, err := ts.MinValues(dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustMiopenConvolution(weight Tensor, bias Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal Tensor) { + + retVal, err := ts.MiopenConvolution(weight, bias, padding, stride, dilation, groups, benchmark, deterministic, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustMiopenConvolutionBackwardBias(gradOutput Tensor)(retVal Tensor) { + + retVal, err := MiopenConvolutionBackwardBias(gradOutput) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustMiopenConvolutionBackwardInput(selfSize []int64, gradOutput Tensor, weight Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool)(retVal Tensor) { + + retVal, err := MiopenConvolutionBackwardInput(selfSize, gradOutput, weight, padding, stride, dilation, groups, benchmark, deterministic) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustMiopenConvolutionBackwardWeight(weightSize []int64, gradOutput Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal Tensor) { + + retVal, err := ts.MiopenConvolutionBackwardWeight(weightSize, gradOutput, padding, stride, dilation, groups, benchmark, deterministic, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustMiopenConvolutionTranspose(weight Tensor, bias Tensor, padding []int64, outputPadding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal Tensor) { + + retVal, err := ts.MiopenConvolutionTranspose(weight, bias, padding, outputPadding, stride, dilation, groups, benchmark, deterministic, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustMiopenConvolutionTransposeBackwardInput(gradOutput Tensor, weight Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool)(retVal Tensor) { + + retVal, err := MiopenConvolutionTransposeBackwardInput(gradOutput, weight, padding, stride, dilation, groups, benchmark, deterministic) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustMiopenConvolutionTransposeBackwardWeight(weightSize []int64, gradOutput Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal Tensor) { + + retVal, err := ts.MiopenConvolutionTransposeBackwardWeight(weightSize, gradOutput, padding, stride, dilation, groups, benchmark, deterministic, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustMiopenDepthwiseConvolution(weight Tensor, bias Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal Tensor) { + + retVal, err := ts.MiopenDepthwiseConvolution(weight, bias, padding, stride, dilation, groups, benchmark, deterministic, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustMiopenDepthwiseConvolutionBackwardInput(selfSize []int64, gradOutput Tensor, weight Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool)(retVal Tensor) { + + retVal, err := MiopenDepthwiseConvolutionBackwardInput(selfSize, gradOutput, weight, padding, stride, dilation, groups, benchmark, deterministic) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustMiopenDepthwiseConvolutionBackwardWeight(weightSize []int64, gradOutput Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal Tensor) { + + retVal, err := ts.MiopenDepthwiseConvolutionBackwardWeight(weightSize, gradOutput, padding, stride, dilation, groups, benchmark, deterministic, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustMkldnnAdaptiveAvgPool2d(outputSize []int64, del bool)(retVal Tensor) { + + retVal, err := ts.MkldnnAdaptiveAvgPool2d(outputSize, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustMkldnnConvolution(weight Tensor, bias Tensor, padding []int64, stride []int64, dilation []int64, groups int64, del bool)(retVal Tensor) { + + retVal, err := ts.MkldnnConvolution(weight, bias, padding, stride, dilation, groups, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustMkldnnConvolutionBackwardInput(selfSize []int64, gradOutput Tensor, weight Tensor, padding []int64, stride []int64, dilation []int64, groups int64, biasDefined bool)(retVal Tensor) { + + retVal, err := MkldnnConvolutionBackwardInput(selfSize, gradOutput, weight, padding, stride, dilation, groups, biasDefined) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustMkldnnLinear(input Tensor, weight Tensor, bias Tensor)(retVal Tensor) { + + retVal, err := MkldnnLinear(input, weight, bias) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustMkldnnMaxPool2d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal Tensor) { + + retVal, err := ts.MkldnnMaxPool2d(kernelSize, stride, padding, dilation, ceilMode, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustMkldnnReorderConv2dWeight(padding []int64, stride []int64, dilation []int64, groups int64, del bool)(retVal Tensor) { + + retVal, err := ts.MkldnnReorderConv2dWeight(padding, stride, dilation, groups, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustMm(mat2 Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.Mm(mat2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustMmOut(out Tensor, mat2 Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.MmOut(out, mat2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustMseLoss(target Tensor, reduction int64, del bool)(retVal Tensor) { + + retVal, err := ts.MseLoss(target, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustMseLossBackward(gradOutput Tensor, target Tensor, reduction int64, del bool)(retVal Tensor) { + + retVal, err := ts.MseLossBackward(gradOutput, target, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustMseLossBackwardOut(gradInput Tensor, gradOutput Tensor, target Tensor, reduction int64, del bool)(retVal Tensor) { + + retVal, err := ts.MseLossBackwardOut(gradInput, gradOutput, target, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustMseLossOut(out Tensor, target Tensor, reduction int64, del bool)(retVal Tensor) { + + retVal, err := ts.MseLossOut(out, target, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustMul(other Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.Mul(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustMul1(other Scalar, del bool)(retVal Tensor) { + + retVal, err := ts.Mul1(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustMul_(other Tensor)() { + + err := ts.Mul_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustMul1_(other Scalar)() { + + err := ts.Mul1_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustMulOut(out Tensor, other Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.MulOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustMultiMarginLossBackward(gradOutput Tensor, target Tensor, p Scalar, margin Scalar, weight Tensor, reduction int64, del bool)(retVal Tensor) { + + retVal, err := ts.MultiMarginLossBackward(gradOutput, target, p, margin, weight, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustMultiMarginLossBackwardOut(gradInput Tensor, gradOutput Tensor, target Tensor, p Scalar, margin Scalar, weight Tensor, reduction int64, del bool)(retVal Tensor) { + + retVal, err := ts.MultiMarginLossBackwardOut(gradInput, gradOutput, target, p, margin, weight, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustMultilabelMarginLoss(target Tensor, reduction int64, del bool)(retVal Tensor) { + + retVal, err := ts.MultilabelMarginLoss(target, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustMultilabelMarginLossBackward(gradOutput Tensor, target Tensor, reduction int64, isTarget Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.MultilabelMarginLossBackward(gradOutput, target, reduction, isTarget, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustMultilabelMarginLossBackwardOut(gradInput Tensor, gradOutput Tensor, target Tensor, reduction int64, isTarget Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.MultilabelMarginLossBackwardOut(gradInput, gradOutput, target, reduction, isTarget, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustMultilabelMarginLossOut(out Tensor, target Tensor, reduction int64, del bool)(retVal Tensor) { + + retVal, err := ts.MultilabelMarginLossOut(out, target, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustMultinomial(numSamples int64, replacement bool, del bool)(retVal Tensor) { + + retVal, err := ts.Multinomial(numSamples, replacement, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustMultinomialOut(out Tensor, numSamples int64, replacement bool, del bool)(retVal Tensor) { + + retVal, err := ts.MultinomialOut(out, numSamples, replacement, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustMv(vec Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.Mv(vec, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustMvOut(out Tensor, vec Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.MvOut(out, vec, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustMvlgamma(p int64, del bool)(retVal Tensor) { + + retVal, err := ts.Mvlgamma(p, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustMvlgamma_(p int64)() { + + err := ts.Mvlgamma_(p) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustNarrow(dim int64, start int64, length int64, del bool)(retVal Tensor) { + + retVal, err := ts.Narrow(dim, start, length, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustNarrow1(dim int64, start Tensor, length int64, del bool)(retVal Tensor) { + + retVal, err := ts.Narrow1(dim, start, length, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustNarrowCopy(dim int64, start int64, length int64, del bool)(retVal Tensor) { + + retVal, err := ts.NarrowCopy(dim, start, length, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustNativeNorm(del bool)(retVal Tensor) { + + retVal, err := ts.NativeNorm(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustNe(other Scalar, del bool)(retVal Tensor) { + + retVal, err := ts.Ne(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustNe1(other Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.Ne1(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustNe_(other Scalar)() { + + err := ts.Ne_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustNe1_(other Tensor)() { + + err := ts.Ne1_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustNeOut(out Tensor, other Scalar, del bool)(retVal Tensor) { + + retVal, err := ts.NeOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustNeOut1(out Tensor, other Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.NeOut1(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustNeg(del bool)(retVal Tensor) { + + retVal, err := ts.Neg(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustNeg_()() { + + err := ts.Neg_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustNegOut(out Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.NegOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustNewEmpty(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device, del bool)(retVal Tensor) { + + retVal, err := ts.NewEmpty(size, optionsKind, optionsDevice, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustNewFull(size []int64, fillValue Scalar, optionsKind gotch.DType, optionsDevice gotch.Device, del bool)(retVal Tensor) { + + retVal, err := ts.NewFull(size, fillValue, optionsKind, optionsDevice, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustNewZeros(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device, del bool)(retVal Tensor) { + + retVal, err := ts.NewZeros(size, optionsKind, optionsDevice, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustNllLoss(target Tensor, weight Tensor, reduction int64, ignoreIndex int64, del bool)(retVal Tensor) { + + retVal, err := ts.NllLoss(target, weight, reduction, ignoreIndex, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustNllLoss2d(target Tensor, weight Tensor, reduction int64, ignoreIndex int64, del bool)(retVal Tensor) { + + retVal, err := ts.NllLoss2d(target, weight, reduction, ignoreIndex, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustNllLoss2dBackward(gradOutput Tensor, target Tensor, weight Tensor, reduction int64, ignoreIndex int64, totalWeight Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.NllLoss2dBackward(gradOutput, target, weight, reduction, ignoreIndex, totalWeight, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustNllLoss2dBackwardOut(gradInput Tensor, gradOutput Tensor, target Tensor, weight Tensor, reduction int64, ignoreIndex int64, totalWeight Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.NllLoss2dBackwardOut(gradInput, gradOutput, target, weight, reduction, ignoreIndex, totalWeight, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustNllLoss2dOut(out Tensor, target Tensor, weight Tensor, reduction int64, ignoreIndex int64, del bool)(retVal Tensor) { + + retVal, err := ts.NllLoss2dOut(out, target, weight, reduction, ignoreIndex, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustNllLossBackward(gradOutput Tensor, target Tensor, weight Tensor, reduction int64, ignoreIndex int64, totalWeight Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.NllLossBackward(gradOutput, target, weight, reduction, ignoreIndex, totalWeight, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustNllLossBackwardOut(gradInput Tensor, gradOutput Tensor, target Tensor, weight Tensor, reduction int64, ignoreIndex int64, totalWeight Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.NllLossBackwardOut(gradInput, gradOutput, target, weight, reduction, ignoreIndex, totalWeight, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustNllLossOut(out Tensor, target Tensor, weight Tensor, reduction int64, ignoreIndex int64, del bool)(retVal Tensor) { + + retVal, err := ts.NllLossOut(out, target, weight, reduction, ignoreIndex, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustNonzero(del bool)(retVal Tensor) { + + retVal, err := ts.Nonzero(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustNonzeroOut(out Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.NonzeroOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustNorm(del bool)(retVal Tensor) { + + retVal, err := ts.Norm(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustNorm1(p Scalar, dtype gotch.DType, del bool)(retVal Tensor) { + + retVal, err := ts.Norm1(p, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustNorm2(p Scalar, dim []int64, keepdim bool, del bool)(retVal Tensor) { + + retVal, err := ts.Norm2(p, dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustNorm3(p Scalar, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal Tensor) { + + retVal, err := ts.Norm3(p, dim, keepdim, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustNormExceptDim(v Tensor, pow int64, dim int64)(retVal Tensor) { + + retVal, err := NormExceptDim(v, pow, dim) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustNormOut(out Tensor, p Scalar, dim []int64, keepdim bool, del bool)(retVal Tensor) { + + retVal, err := ts.NormOut(out, p, dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustNormOut1(out Tensor, p Scalar, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal Tensor) { + + retVal, err := ts.NormOut1(out, p, dim, keepdim, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustNormal_(mean float64, std float64)() { + + err := ts.Normal_(mean, std) + if err != nil { log.Fatal(err) } + + return +} + +func MustNormalOut(out Tensor, mean Tensor, std float64)(retVal Tensor) { + + retVal, err := NormalOut(out, mean, std) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustNormalOut1(out Tensor, mean float64, std Tensor)(retVal Tensor) { + + retVal, err := NormalOut1(out, mean, std) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustNormalOut2(out Tensor, mean Tensor, std Tensor)(retVal Tensor) { + + retVal, err := NormalOut2(out, mean, std) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustNormalOut3(out Tensor, mean float64, std float64, size []int64)(retVal Tensor) { + + retVal, err := NormalOut3(out, mean, std, size) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustNuclearNorm(keepdim bool, del bool)(retVal Tensor) { + + retVal, err := ts.NuclearNorm(keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustNuclearNorm1(dim []int64, keepdim bool, del bool)(retVal Tensor) { + + retVal, err := ts.NuclearNorm1(dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustNuclearNormOut(out Tensor, keepdim bool, del bool)(retVal Tensor) { + + retVal, err := ts.NuclearNormOut(out, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustNuclearNormOut1(out Tensor, dim []int64, keepdim bool, del bool)(retVal Tensor) { + + retVal, err := ts.NuclearNormOut1(out, dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustNumpyT(del bool)(retVal Tensor) { + + retVal, err := ts.NumpyT(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustOneHot(numClasses int64, del bool)(retVal Tensor) { + + retVal, err := ts.OneHot(numClasses, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustOnes(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { + + retVal, err := Ones(size, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustOnesLike(del bool)(retVal Tensor) { + + retVal, err := ts.OnesLike(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustOnesOut(out Tensor, size []int64)(retVal Tensor) { + + retVal, err := OnesOut(out, size) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustOrgqr(input2 Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.Orgqr(input2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustOrgqrOut(out Tensor, input2 Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.OrgqrOut(out, input2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustOrmqr(input2 Tensor, input3 Tensor, left bool, transpose bool, del bool)(retVal Tensor) { + + retVal, err := ts.Ormqr(input2, input3, left, transpose, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustOrmqrOut(out Tensor, input2 Tensor, input3 Tensor, left bool, transpose bool, del bool)(retVal Tensor) { + + retVal, err := ts.OrmqrOut(out, input2, input3, left, transpose, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustPairwiseDistance(x1 Tensor, x2 Tensor, p float64, eps float64, keepdim bool)(retVal Tensor) { + + retVal, err := PairwiseDistance(x1, x2, p, eps, keepdim) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustPdist(p float64, del bool)(retVal Tensor) { + + retVal, err := ts.Pdist(p, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustPermute(dims []int64, del bool)(retVal Tensor) { + + retVal, err := ts.Permute(dims, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustPinMemory(del bool)(retVal Tensor) { + + retVal, err := ts.PinMemory(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustPinverse(rcond float64, del bool)(retVal Tensor) { + + retVal, err := ts.Pinverse(rcond, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustPixelShuffle(upscaleFactor int64, del bool)(retVal Tensor) { + + retVal, err := ts.PixelShuffle(upscaleFactor, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustPoisson(del bool)(retVal Tensor) { + + retVal, err := ts.Poisson(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustPoissonNllLoss(input Tensor, target Tensor, logInput bool, full bool, eps float64, reduction int64)(retVal Tensor) { + + retVal, err := PoissonNllLoss(input, target, logInput, full, eps, reduction) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustPolygamma(n int64, del bool)(retVal Tensor) { + + retVal, err := ts.Polygamma(n, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustPolygamma_(n int64)() { + + err := ts.Polygamma_(n) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustPolygammaOut(out Tensor, n int64, del bool)(retVal Tensor) { + + retVal, err := ts.PolygammaOut(out, n, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustPow(exponent Scalar, del bool)(retVal Tensor) { + + retVal, err := ts.Pow(exponent, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustPow1(exponent Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.Pow1(exponent, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustPow2(selfScalar Scalar, exponent Tensor)(retVal Tensor) { + + retVal, err := Pow2(selfScalar, exponent) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustPow_(exponent Scalar)() { + + err := ts.Pow_(exponent) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustPow1_(exponent Tensor)() { + + err := ts.Pow1_(exponent) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustPowOut(out Tensor, exponent Scalar, del bool)(retVal Tensor) { + + retVal, err := ts.PowOut(out, exponent, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustPowOut1(out Tensor, exponent Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.PowOut1(out, exponent, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustPowOut2(out Tensor, selfScalar Scalar, exponent Tensor)(retVal Tensor) { + + retVal, err := PowOut2(out, selfScalar, exponent) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustPrelu(weight Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.Prelu(weight, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustProd(dtype gotch.DType, del bool)(retVal Tensor) { + + retVal, err := ts.Prod(dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustProd1(dim int64, keepdim bool, dtype gotch.DType, del bool)(retVal Tensor) { + + retVal, err := ts.Prod1(dim, keepdim, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustProdOut(out Tensor, dim int64, keepdim bool, dtype gotch.DType, del bool)(retVal Tensor) { + + retVal, err := ts.ProdOut(out, dim, keepdim, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustPut_(index Tensor, source Tensor, accumulate bool)() { + + err := ts.Put_(index, source, accumulate) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustQPerChannelScales(del bool)(retVal Tensor) { + + retVal, err := ts.QPerChannelScales(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustQPerChannelZeroPoints(del bool)(retVal Tensor) { + + retVal, err := ts.QPerChannelZeroPoints(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustQuantizePerChannel(scales Tensor, zeroPoints Tensor, axis int64, dtype gotch.DType, del bool)(retVal Tensor) { + + retVal, err := ts.QuantizePerChannel(scales, zeroPoints, axis, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustQuantizePerTensor(scale float64, zeroPoint int64, dtype gotch.DType, del bool)(retVal Tensor) { + + retVal, err := ts.QuantizePerTensor(scale, zeroPoint, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustQuantizedBatchNorm(input Tensor, weight Tensor, bias Tensor, mean Tensor, vari Tensor, eps float64, outputScale float64, outputZeroPoint int64)(retVal Tensor) { + + retVal, err := QuantizedBatchNorm(input, weight, bias, mean, vari, eps, outputScale, outputZeroPoint) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustQuantizedGruCell(input Tensor, hx Tensor, wIh Tensor, wHh Tensor, bIh Tensor, bHh Tensor, packedIh Tensor, packedHh Tensor, colOffsetsIh Tensor, colOffsetsHh Tensor, scaleIh Scalar, scaleHh Scalar, zeroPointIh Scalar, zeroPointHh Scalar)(retVal Tensor) { + + retVal, err := QuantizedGruCell(input, hx, wIh, wHh, bIh, bHh, packedIh, packedHh, colOffsetsIh, colOffsetsHh, scaleIh, scaleHh, zeroPointIh, zeroPointHh) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustQuantizedMaxPool2d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal Tensor) { + + retVal, err := ts.QuantizedMaxPool2d(kernelSize, stride, padding, dilation, ceilMode, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustQuantizedRnnReluCell(input Tensor, hx Tensor, wIh Tensor, wHh Tensor, bIh Tensor, bHh Tensor, packedIh Tensor, packedHh Tensor, colOffsetsIh Tensor, colOffsetsHh Tensor, scaleIh Scalar, scaleHh Scalar, zeroPointIh Scalar, zeroPointHh Scalar)(retVal Tensor) { + + retVal, err := QuantizedRnnReluCell(input, hx, wIh, wHh, bIh, bHh, packedIh, packedHh, colOffsetsIh, colOffsetsHh, scaleIh, scaleHh, zeroPointIh, zeroPointHh) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustQuantizedRnnTanhCell(input Tensor, hx Tensor, wIh Tensor, wHh Tensor, bIh Tensor, bHh Tensor, packedIh Tensor, packedHh Tensor, colOffsetsIh Tensor, colOffsetsHh Tensor, scaleIh Scalar, scaleHh Scalar, zeroPointIh Scalar, zeroPointHh Scalar)(retVal Tensor) { + + retVal, err := QuantizedRnnTanhCell(input, hx, wIh, wHh, bIh, bHh, packedIh, packedHh, colOffsetsIh, colOffsetsHh, scaleIh, scaleHh, zeroPointIh, zeroPointHh) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustRand(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { + + retVal, err := Rand(size, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustRandLike(del bool)(retVal Tensor) { + + retVal, err := ts.RandLike(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustRandOut(out Tensor, size []int64)(retVal Tensor) { + + retVal, err := RandOut(out, size) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustRandint(high int64, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { + + retVal, err := Randint(high, size, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustRandint1(low int64, high int64, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { + + retVal, err := Randint1(low, high, size, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustRandintLike(high int64, del bool)(retVal Tensor) { + + retVal, err := ts.RandintLike(high, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustRandintLike1(low int64, high int64, del bool)(retVal Tensor) { + + retVal, err := ts.RandintLike1(low, high, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustRandintOut(out Tensor, high int64, size []int64)(retVal Tensor) { + + retVal, err := RandintOut(out, high, size) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustRandintOut1(out Tensor, low int64, high int64, size []int64)(retVal Tensor) { + + retVal, err := RandintOut1(out, low, high, size) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustRandn(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { + + retVal, err := Randn(size, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustRandnLike(del bool)(retVal Tensor) { + + retVal, err := ts.RandnLike(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustRandnOut(out Tensor, size []int64)(retVal Tensor) { + + retVal, err := RandnOut(out, size) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustRandom_()() { + + err := ts.Random_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustRandom1_(to int64)() { + + err := ts.Random1_(to) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustRandom2(from int64, to int64)() { + + err := ts.Random2(from, to) + if err != nil { log.Fatal(err) } + + return +} + +func MustRandperm(n int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { + + retVal, err := Randperm(n, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustRandpermOut(out Tensor, n int64)(retVal Tensor) { + + retVal, err := RandpermOut(out, n) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustRange(start Scalar, end Scalar, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { + + retVal, err := Range(start, end, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustRange1(start Scalar, end Scalar, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { + + retVal, err := Range1(start, end, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustRangeOut(out Tensor, start Scalar, end Scalar)(retVal Tensor) { + + retVal, err := RangeOut(out, start, end) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustReal(del bool)(retVal Tensor) { + + retVal, err := ts.Real(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustReciprocal(del bool)(retVal Tensor) { + + retVal, err := ts.Reciprocal(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustReciprocal_()() { + + err := ts.Reciprocal_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustReciprocalOut(out Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.ReciprocalOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustReflectionPad1d(padding []int64, del bool)(retVal Tensor) { + + retVal, err := ts.ReflectionPad1d(padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustReflectionPad1dBackward(gradOutput Tensor, padding []int64, del bool)(retVal Tensor) { + + retVal, err := ts.ReflectionPad1dBackward(gradOutput, padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustReflectionPad1dBackwardOut(gradInput Tensor, gradOutput Tensor, padding []int64, del bool)(retVal Tensor) { + + retVal, err := ts.ReflectionPad1dBackwardOut(gradInput, gradOutput, padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustReflectionPad1dOut(out Tensor, padding []int64, del bool)(retVal Tensor) { + + retVal, err := ts.ReflectionPad1dOut(out, padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustReflectionPad2d(padding []int64, del bool)(retVal Tensor) { + + retVal, err := ts.ReflectionPad2d(padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustReflectionPad2dBackward(gradOutput Tensor, padding []int64, del bool)(retVal Tensor) { + + retVal, err := ts.ReflectionPad2dBackward(gradOutput, padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustReflectionPad2dBackwardOut(gradInput Tensor, gradOutput Tensor, padding []int64, del bool)(retVal Tensor) { + + retVal, err := ts.ReflectionPad2dBackwardOut(gradInput, gradOutput, padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustReflectionPad2dOut(out Tensor, padding []int64, del bool)(retVal Tensor) { + + retVal, err := ts.ReflectionPad2dOut(out, padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustRelu(del bool)(retVal Tensor) { + + retVal, err := ts.Relu(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustRelu_()() { + + err := ts.Relu_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustRemainder(other Scalar, del bool)(retVal Tensor) { + + retVal, err := ts.Remainder(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustRemainder1(other Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.Remainder1(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustRemainder_(other Scalar)() { + + err := ts.Remainder_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustRemainder1_(other Tensor)() { + + err := ts.Remainder1_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustRemainderOut(out Tensor, other Scalar, del bool)(retVal Tensor) { + + retVal, err := ts.RemainderOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustRemainderOut1(out Tensor, other Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.RemainderOut1(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustRenorm(p Scalar, dim int64, maxnorm Scalar, del bool)(retVal Tensor) { + + retVal, err := ts.Renorm(p, dim, maxnorm, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustRenorm_(p Scalar, dim int64, maxnorm Scalar)() { + + err := ts.Renorm_(p, dim, maxnorm) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustRenormOut(out Tensor, p Scalar, dim int64, maxnorm Scalar, del bool)(retVal Tensor) { + + retVal, err := ts.RenormOut(out, p, dim, maxnorm, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustRepeat(repeats []int64, del bool)(retVal Tensor) { + + retVal, err := ts.Repeat(repeats, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustRepeatInterleave(repeats Tensor)(retVal Tensor) { + + retVal, err := RepeatInterleave(repeats) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustRepeatInterleave1(repeats Tensor, dim int64, del bool)(retVal Tensor) { + + retVal, err := ts.RepeatInterleave1(repeats, dim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustRepeatInterleave2(repeats int64, dim int64, del bool)(retVal Tensor) { + + retVal, err := ts.RepeatInterleave2(repeats, dim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustReplicationPad1d(padding []int64, del bool)(retVal Tensor) { + + retVal, err := ts.ReplicationPad1d(padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustReplicationPad1dBackward(gradOutput Tensor, padding []int64, del bool)(retVal Tensor) { + + retVal, err := ts.ReplicationPad1dBackward(gradOutput, padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustReplicationPad1dBackwardOut(gradInput Tensor, gradOutput Tensor, padding []int64, del bool)(retVal Tensor) { + + retVal, err := ts.ReplicationPad1dBackwardOut(gradInput, gradOutput, padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustReplicationPad1dOut(out Tensor, padding []int64, del bool)(retVal Tensor) { + + retVal, err := ts.ReplicationPad1dOut(out, padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustReplicationPad2d(padding []int64, del bool)(retVal Tensor) { + + retVal, err := ts.ReplicationPad2d(padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustReplicationPad2dBackward(gradOutput Tensor, padding []int64, del bool)(retVal Tensor) { + + retVal, err := ts.ReplicationPad2dBackward(gradOutput, padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustReplicationPad2dBackwardOut(gradInput Tensor, gradOutput Tensor, padding []int64, del bool)(retVal Tensor) { + + retVal, err := ts.ReplicationPad2dBackwardOut(gradInput, gradOutput, padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustReplicationPad2dOut(out Tensor, padding []int64, del bool)(retVal Tensor) { + + retVal, err := ts.ReplicationPad2dOut(out, padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustReplicationPad3d(padding []int64, del bool)(retVal Tensor) { + + retVal, err := ts.ReplicationPad3d(padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustReplicationPad3dBackward(gradOutput Tensor, padding []int64, del bool)(retVal Tensor) { + + retVal, err := ts.ReplicationPad3dBackward(gradOutput, padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustReplicationPad3dBackwardOut(gradInput Tensor, gradOutput Tensor, padding []int64, del bool)(retVal Tensor) { + + retVal, err := ts.ReplicationPad3dBackwardOut(gradInput, gradOutput, padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustReplicationPad3dOut(out Tensor, padding []int64, del bool)(retVal Tensor) { + + retVal, err := ts.ReplicationPad3dOut(out, padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustRequiresGrad_(requiresGrad bool)() { + + err := ts.RequiresGrad_(requiresGrad) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustReshape(shape []int64, del bool)(retVal Tensor) { + + retVal, err := ts.Reshape(shape, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustReshapeAs(other Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.ReshapeAs(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustResize_(size []int64)() { + + err := ts.Resize_(size) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustResizeAs_(theTemplate Tensor)() { + + err := ts.ResizeAs_(theTemplate) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustRfft(signalNdim int64, normalized bool, onesided bool, del bool)(retVal Tensor) { + + retVal, err := ts.Rfft(signalNdim, normalized, onesided, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustRnnReluCell(input Tensor, hx Tensor, wIh Tensor, wHh Tensor, bIh Tensor, bHh Tensor)(retVal Tensor) { + + retVal, err := RnnReluCell(input, hx, wIh, wHh, bIh, bHh) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustRnnTanhCell(input Tensor, hx Tensor, wIh Tensor, wHh Tensor, bIh Tensor, bHh Tensor)(retVal Tensor) { + + retVal, err := RnnTanhCell(input, hx, wIh, wHh, bIh, bHh) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustRoll(shifts []int64, dims []int64, del bool)(retVal Tensor) { + + retVal, err := ts.Roll(shifts, dims, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustRot90(k int64, dims []int64, del bool)(retVal Tensor) { + + retVal, err := ts.Rot90(k, dims, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustRound(del bool)(retVal Tensor) { + + retVal, err := ts.Round(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustRound_()() { + + err := ts.Round_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustRoundOut(out Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.RoundOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustRrelu(training bool, del bool)(retVal Tensor) { + + retVal, err := ts.Rrelu(training, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustRrelu_(training bool)() { + + err := ts.Rrelu_(training) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustRreluWithNoise(noise Tensor, training bool, del bool)(retVal Tensor) { + + retVal, err := ts.RreluWithNoise(noise, training, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustRreluWithNoise_(noise Tensor, training bool)() { + + err := ts.RreluWithNoise_(noise, training) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustRreluWithNoiseBackward(gradOutput Tensor, noise Tensor, lower Scalar, upper Scalar, training bool, selfIsResult bool, del bool)(retVal Tensor) { + + retVal, err := ts.RreluWithNoiseBackward(gradOutput, noise, lower, upper, training, selfIsResult, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustRreluWithNoiseOut(out Tensor, noise Tensor, training bool, del bool)(retVal Tensor) { + + retVal, err := ts.RreluWithNoiseOut(out, noise, training, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustRsqrt(del bool)(retVal Tensor) { + + retVal, err := ts.Rsqrt(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustRsqrt_()() { + + err := ts.Rsqrt_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustRsqrtOut(out Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.RsqrtOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustRsub(other Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.Rsub(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustRsub1(other Scalar, del bool)(retVal Tensor) { + + retVal, err := ts.Rsub1(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustScalarTensor(s Scalar, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { + + retVal, err := ScalarTensor(s, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustScatter(dim int64, index Tensor, src Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.Scatter(dim, index, src, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustScatter1(dim int64, index Tensor, value Scalar, del bool)(retVal Tensor) { + + retVal, err := ts.Scatter1(dim, index, value, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustScatter_(dim int64, index Tensor, src Tensor)() { + + err := ts.Scatter_(dim, index, src) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustScatter1_(dim int64, index Tensor, value Scalar)() { + + err := ts.Scatter1_(dim, index, value) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustScatterAdd(dim int64, index Tensor, src Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.ScatterAdd(dim, index, src, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustScatterAdd_(dim int64, index Tensor, src Tensor)() { + + err := ts.ScatterAdd_(dim, index, src) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustSelect(dim int64, index int64, del bool)(retVal Tensor) { + + retVal, err := ts.Select(dim, index, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustSelu(del bool)(retVal Tensor) { + + retVal, err := ts.Selu(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustSelu_()() { + + err := ts.Selu_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustSet_()() { + + err := ts.Set_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustSet1_(source Tensor)() { + + err := ts.Set1_(source) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustSetRequiresGrad(r bool, del bool)(retVal Tensor) { + + retVal, err := ts.SetRequiresGrad(r, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustSigmoid(del bool)(retVal Tensor) { + + retVal, err := ts.Sigmoid(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustSigmoid_()() { + + err := ts.Sigmoid_() + if err != nil { log.Fatal(err) } + + return +} + +func MustSigmoidBackward(gradOutput Tensor, output Tensor)(retVal Tensor) { + + retVal, err := SigmoidBackward(gradOutput, output) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustSigmoidBackwardOut(gradInput Tensor, gradOutput Tensor, output Tensor)(retVal Tensor) { + + retVal, err := SigmoidBackwardOut(gradInput, gradOutput, output) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustSigmoidOut(out Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.SigmoidOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustSign(del bool)(retVal Tensor) { + + retVal, err := ts.Sign(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustSign_()() { + + err := ts.Sign_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustSignOut(out Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.SignOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustSin(del bool)(retVal Tensor) { + + retVal, err := ts.Sin(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustSin_()() { + + err := ts.Sin_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustSinOut(out Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.SinOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustSinh(del bool)(retVal Tensor) { + + retVal, err := ts.Sinh(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustSinh_()() { + + err := ts.Sinh_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustSinhOut(out Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.SinhOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustSlice(dim int64, start int64, end int64, step int64, del bool)(retVal Tensor) { + + retVal, err := ts.Slice(dim, start, end, step, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustSlowConv3d(weight Tensor, kernelSize []int64, bias Tensor, stride []int64, padding []int64, del bool)(retVal Tensor) { + + retVal, err := ts.SlowConv3d(weight, kernelSize, bias, stride, padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustSlowConv3dOut(out Tensor, weight Tensor, kernelSize []int64, bias Tensor, stride []int64, padding []int64, del bool)(retVal Tensor) { + + retVal, err := ts.SlowConv3dOut(out, weight, kernelSize, bias, stride, padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustSlowConvDilated2d(weight Tensor, kernelSize []int64, bias Tensor, stride []int64, padding []int64, dilation []int64, del bool)(retVal Tensor) { + + retVal, err := ts.SlowConvDilated2d(weight, kernelSize, bias, stride, padding, dilation, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustSlowConvDilated3d(weight Tensor, kernelSize []int64, bias Tensor, stride []int64, padding []int64, dilation []int64, del bool)(retVal Tensor) { + + retVal, err := ts.SlowConvDilated3d(weight, kernelSize, bias, stride, padding, dilation, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustSlowConvTranspose2d(weight Tensor, kernelSize []int64, bias Tensor, stride []int64, padding []int64, outputPadding []int64, dilation []int64, del bool)(retVal Tensor) { + + retVal, err := ts.SlowConvTranspose2d(weight, kernelSize, bias, stride, padding, outputPadding, dilation, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustSlowConvTranspose2dOut(out Tensor, weight Tensor, kernelSize []int64, bias Tensor, stride []int64, padding []int64, outputPadding []int64, dilation []int64, del bool)(retVal Tensor) { + + retVal, err := ts.SlowConvTranspose2dOut(out, weight, kernelSize, bias, stride, padding, outputPadding, dilation, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustSlowConvTranspose3d(weight Tensor, kernelSize []int64, bias Tensor, stride []int64, padding []int64, outputPadding []int64, dilation []int64, del bool)(retVal Tensor) { + + retVal, err := ts.SlowConvTranspose3d(weight, kernelSize, bias, stride, padding, outputPadding, dilation, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustSlowConvTranspose3dOut(out Tensor, weight Tensor, kernelSize []int64, bias Tensor, stride []int64, padding []int64, outputPadding []int64, dilation []int64, del bool)(retVal Tensor) { + + retVal, err := ts.SlowConvTranspose3dOut(out, weight, kernelSize, bias, stride, padding, outputPadding, dilation, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustSmm(mat2 Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.Smm(mat2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustSmoothL1Loss(target Tensor, reduction int64, del bool)(retVal Tensor) { + + retVal, err := ts.SmoothL1Loss(target, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustSmoothL1LossBackward(gradOutput Tensor, target Tensor, reduction int64, del bool)(retVal Tensor) { + + retVal, err := ts.SmoothL1LossBackward(gradOutput, target, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustSmoothL1LossBackwardOut(gradInput Tensor, gradOutput Tensor, target Tensor, reduction int64, del bool)(retVal Tensor) { + + retVal, err := ts.SmoothL1LossBackwardOut(gradInput, gradOutput, target, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustSmoothL1LossOut(out Tensor, target Tensor, reduction int64, del bool)(retVal Tensor) { + + retVal, err := ts.SmoothL1LossOut(out, target, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustSoftMarginLoss(target Tensor, reduction int64, del bool)(retVal Tensor) { + + retVal, err := ts.SoftMarginLoss(target, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustSoftMarginLossBackward(gradOutput Tensor, target Tensor, reduction int64, del bool)(retVal Tensor) { + + retVal, err := ts.SoftMarginLossBackward(gradOutput, target, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustSoftMarginLossBackwardOut(gradInput Tensor, gradOutput Tensor, target Tensor, reduction int64, del bool)(retVal Tensor) { + + retVal, err := ts.SoftMarginLossBackwardOut(gradInput, gradOutput, target, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustSoftMarginLossOut(out Tensor, target Tensor, reduction int64, del bool)(retVal Tensor) { + + retVal, err := ts.SoftMarginLossOut(out, target, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustSoftmax(dim int64, dtype gotch.DType, del bool)(retVal Tensor) { + + retVal, err := ts.Softmax(dim, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustSoftplus(del bool)(retVal Tensor) { + + retVal, err := ts.Softplus(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustSoftplusBackward(gradOutput Tensor, beta Scalar, threshold Scalar, output Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.SoftplusBackward(gradOutput, beta, threshold, output, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustSoftplusBackwardOut(gradInput Tensor, gradOutput Tensor, beta Scalar, threshold Scalar, output Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.SoftplusBackwardOut(gradInput, gradOutput, beta, threshold, output, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustSoftplusOut(out Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.SoftplusOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustSoftshrink(del bool)(retVal Tensor) { + + retVal, err := ts.Softshrink(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustSoftshrinkBackward(gradOutput Tensor, lambd Scalar, del bool)(retVal Tensor) { + + retVal, err := ts.SoftshrinkBackward(gradOutput, lambd, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustSoftshrinkBackwardOut(gradInput Tensor, gradOutput Tensor, lambd Scalar, del bool)(retVal Tensor) { + + retVal, err := ts.SoftshrinkBackwardOut(gradInput, gradOutput, lambd, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustSoftshrinkOut(out Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.SoftshrinkOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustSparseCooTensor(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { + + retVal, err := SparseCooTensor(size, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustSparseCooTensor1(indices Tensor, values Tensor, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { + + retVal, err := SparseCooTensor1(indices, values, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustSparseCooTensor2(indices Tensor, values Tensor, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { + + retVal, err := SparseCooTensor2(indices, values, size, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustSparseMask(mask Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.SparseMask(mask, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustSparseResize_(size []int64, sparseDim int64, denseDim int64)() { + + err := ts.SparseResize_(size, sparseDim, denseDim) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustSparseResizeAndClear_(size []int64, sparseDim int64, denseDim int64)() { + + err := ts.SparseResizeAndClear_(size, sparseDim, denseDim) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustSqrt(del bool)(retVal Tensor) { + + retVal, err := ts.Sqrt(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustSqrt_()() { + + err := ts.Sqrt_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustSqrtOut(out Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.SqrtOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustSquare(del bool)(retVal Tensor) { + + retVal, err := ts.Square(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustSquare_()() { + + err := ts.Square_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustSqueeze(del bool)(retVal Tensor) { + + retVal, err := ts.Squeeze(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustSqueeze1(dim int64, del bool)(retVal Tensor) { + + retVal, err := ts.Squeeze1(dim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustSqueeze_()() { + + err := ts.Squeeze_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustSqueeze1_(dim int64)() { + + err := ts.Squeeze1_(dim) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustSspaddmm(mat1 Tensor, mat2 Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.Sspaddmm(mat1, mat2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustSspaddmmOut(out Tensor, mat1 Tensor, mat2 Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.SspaddmmOut(out, mat1, mat2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustStack(tensors []Tensor, dim int64)(retVal Tensor) { + + retVal, err := Stack(tensors, dim) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustStackOut(out Tensor, tensors []Tensor, dim int64)(retVal Tensor) { + + retVal, err := StackOut(out, tensors, dim) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustStd(unbiased bool, del bool)(retVal Tensor) { + + retVal, err := ts.Std(unbiased, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustStd1(dim []int64, unbiased bool, keepdim bool, del bool)(retVal Tensor) { + + retVal, err := ts.Std1(dim, unbiased, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustStdOut(out Tensor, dim []int64, unbiased bool, keepdim bool, del bool)(retVal Tensor) { + + retVal, err := ts.StdOut(out, dim, unbiased, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustStft(nFft int64, hopLength int64, winLength int64, window Tensor, normalized bool, onesided bool, del bool)(retVal Tensor) { + + retVal, err := ts.Stft(nFft, hopLength, winLength, window, normalized, onesided, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustSub(other Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.Sub(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustSub1(other Scalar, del bool)(retVal Tensor) { + + retVal, err := ts.Sub1(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustSub_(other Tensor)() { + + err := ts.Sub_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustSub1_(other Scalar)() { + + err := ts.Sub1_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustSubOut(out Tensor, other Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.SubOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustSum(dtype gotch.DType, del bool)(retVal Tensor) { + + retVal, err := ts.Sum(dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustSum1(dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal Tensor) { + + retVal, err := ts.Sum1(dim, keepdim, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustSumOut(out Tensor, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal Tensor) { + + retVal, err := ts.SumOut(out, dim, keepdim, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustSumToSize(size []int64, del bool)(retVal Tensor) { + + retVal, err := ts.SumToSize(size, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustT(del bool)(retVal Tensor) { + + retVal, err := ts.T(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustT_()() { + + err := ts.T_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustTake(index Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.Take(index, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustTakeOut(out Tensor, index Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.TakeOut(out, index, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustTan(del bool)(retVal Tensor) { + + retVal, err := ts.Tan(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustTan_()() { + + err := ts.Tan_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustTanOut(out Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.TanOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustTanh(del bool)(retVal Tensor) { + + retVal, err := ts.Tanh(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustTanh_()() { + + err := ts.Tanh_() + if err != nil { log.Fatal(err) } + + return +} + +func MustTanhBackward(gradOutput Tensor, output Tensor)(retVal Tensor) { + + retVal, err := TanhBackward(gradOutput, output) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustTanhBackwardOut(gradInput Tensor, gradOutput Tensor, output Tensor)(retVal Tensor) { + + retVal, err := TanhBackwardOut(gradInput, gradOutput, output) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustTanhOut(out Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.TanhOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustTensordot(other Tensor, dimsSelf []int64, dimsOther []int64, del bool)(retVal Tensor) { + + retVal, err := ts.Tensordot(other, dimsSelf, dimsOther, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustThreshold(threshold Scalar, value Scalar, del bool)(retVal Tensor) { + + retVal, err := ts.Threshold(threshold, value, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustThreshold_(threshold Scalar, value Scalar)() { + + err := ts.Threshold_(threshold, value) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustThresholdBackward(gradOutput Tensor, threshold Scalar, del bool)(retVal Tensor) { + + retVal, err := ts.ThresholdBackward(gradOutput, threshold, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustThresholdOut(out Tensor, threshold Scalar, value Scalar, del bool)(retVal Tensor) { + + retVal, err := ts.ThresholdOut(out, threshold, value, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustTo(device gotch.Device, del bool)(retVal Tensor) { + + retVal, err := ts.To(device, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustTo1(optionsKind gotch.DType, optionsDevice gotch.Device, nonBlocking bool, copy bool, del bool)(retVal Tensor) { + + retVal, err := ts.To1(optionsKind, optionsDevice, nonBlocking, copy, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustTo2(dtype gotch.DType, nonBlocking bool, copy bool, del bool)(retVal Tensor) { + + retVal, err := ts.To2(dtype, nonBlocking, copy, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustTo3(other Tensor, nonBlocking bool, copy bool, del bool)(retVal Tensor) { + + retVal, err := ts.To3(other, nonBlocking, copy, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustTo4(device gotch.Device, dtype gotch.DType, nonBlocking bool, copy bool, del bool)(retVal Tensor) { + + retVal, err := ts.To4(device, dtype, nonBlocking, copy, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustToDense(del bool)(retVal Tensor) { + + retVal, err := ts.ToDense(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustToDenseBackward(grad Tensor, input Tensor)(retVal Tensor) { + + retVal, err := ToDenseBackward(grad, input) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustToMkldnn(del bool)(retVal Tensor) { + + retVal, err := ts.ToMkldnn(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustToMkldnnBackward(grad Tensor, input Tensor)(retVal Tensor) { + + retVal, err := ToMkldnnBackward(grad, input) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustToSparse(del bool)(retVal Tensor) { + + retVal, err := ts.ToSparse(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustToSparse1(sparseDim int64, del bool)(retVal Tensor) { + + retVal, err := ts.ToSparse1(sparseDim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustTotype(scalarType gotch.DType, del bool)(retVal Tensor) { + + retVal, err := ts.Totype(scalarType, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustTrace(del bool)(retVal Tensor) { + + retVal, err := ts.Trace(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustTranspose(dim0 int64, dim1 int64, del bool)(retVal Tensor) { + + retVal, err := ts.Transpose(dim0, dim1, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustTranspose_(dim0 int64, dim1 int64)() { + + err := ts.Transpose_(dim0, dim1) + if err != nil { log.Fatal(err) } + + return +} + +func MustTrapz(y Tensor, x Tensor, dim int64)(retVal Tensor) { + + retVal, err := Trapz(y, x, dim) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustTrapz1(y Tensor, dx float64, dim int64)(retVal Tensor) { + + retVal, err := Trapz1(y, dx, dim) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustTril(diagonal int64, del bool)(retVal Tensor) { + + retVal, err := ts.Tril(diagonal, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustTril_(diagonal int64)() { + + err := ts.Tril_(diagonal) + if err != nil { log.Fatal(err) } + + return +} + +func MustTrilIndices(row int64, col int64, offset int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { + + retVal, err := TrilIndices(row, col, offset, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustTrilOut(out Tensor, diagonal int64, del bool)(retVal Tensor) { + + retVal, err := ts.TrilOut(out, diagonal, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustTripletMarginLoss(anchor Tensor, positive Tensor, negative Tensor, margin float64, p float64, eps float64, swap bool, reduction int64)(retVal Tensor) { + + retVal, err := TripletMarginLoss(anchor, positive, negative, margin, p, eps, swap, reduction) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustTriu(diagonal int64, del bool)(retVal Tensor) { + + retVal, err := ts.Triu(diagonal, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustTriu_(diagonal int64)() { + + err := ts.Triu_(diagonal) + if err != nil { log.Fatal(err) } + + return +} + +func MustTriuIndices(row int64, col int64, offset int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { + + retVal, err := TriuIndices(row, col, offset, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustTriuOut(out Tensor, diagonal int64, del bool)(retVal Tensor) { + + retVal, err := ts.TriuOut(out, diagonal, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustTrueDivide(other Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.TrueDivide(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustTrueDivide1(other Scalar, del bool)(retVal Tensor) { + + retVal, err := ts.TrueDivide1(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustTrueDivide_(other Tensor)() { + + err := ts.TrueDivide_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustTrueDivide1_(other Scalar)() { + + err := ts.TrueDivide1_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustTrueDivideOut(out Tensor, other Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.TrueDivideOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustTrunc(del bool)(retVal Tensor) { + + retVal, err := ts.Trunc(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustTrunc_()() { + + err := ts.Trunc_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustTruncOut(out Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.TruncOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustTypeAs(other Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.TypeAs(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustUnfold(dimension int64, size int64, step int64, del bool)(retVal Tensor) { + + retVal, err := ts.Unfold(dimension, size, step, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustUniform_(from float64, to float64)() { + + err := ts.Uniform_(from, to) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustUnsqueeze(dim int64, del bool)(retVal Tensor) { + + retVal, err := ts.Unsqueeze(dim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustUnsqueeze_(dim int64)() { + + err := ts.Unsqueeze_(dim) + if err != nil { log.Fatal(err) } + + return +} + +func(ts Tensor) MustUpsampleBicubic2d(outputSize []int64, alignCorners bool, scalesH float64, scalesW float64, del bool)(retVal Tensor) { + + retVal, err := ts.UpsampleBicubic2d(outputSize, alignCorners, scalesH, scalesW, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustUpsampleBicubic2dBackward(gradOutput Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH float64, scalesW float64)(retVal Tensor) { + + retVal, err := UpsampleBicubic2dBackward(gradOutput, outputSize, inputSize, alignCorners, scalesH, scalesW) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustUpsampleBicubic2dBackwardOut(gradInput Tensor, gradOutput Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH float64, scalesW float64)(retVal Tensor) { + + retVal, err := UpsampleBicubic2dBackwardOut(gradInput, gradOutput, outputSize, inputSize, alignCorners, scalesH, scalesW) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustUpsampleBicubic2dOut(out Tensor, outputSize []int64, alignCorners bool, scalesH float64, scalesW float64, del bool)(retVal Tensor) { + + retVal, err := ts.UpsampleBicubic2dOut(out, outputSize, alignCorners, scalesH, scalesW, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustUpsampleBilinear2d(outputSize []int64, alignCorners bool, scalesH float64, scalesW float64, del bool)(retVal Tensor) { + + retVal, err := ts.UpsampleBilinear2d(outputSize, alignCorners, scalesH, scalesW, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustUpsampleBilinear2dBackward(gradOutput Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH float64, scalesW float64)(retVal Tensor) { + + retVal, err := UpsampleBilinear2dBackward(gradOutput, outputSize, inputSize, alignCorners, scalesH, scalesW) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustUpsampleBilinear2dBackwardOut(gradInput Tensor, gradOutput Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH float64, scalesW float64)(retVal Tensor) { + + retVal, err := UpsampleBilinear2dBackwardOut(gradInput, gradOutput, outputSize, inputSize, alignCorners, scalesH, scalesW) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustUpsampleBilinear2dOut(out Tensor, outputSize []int64, alignCorners bool, scalesH float64, scalesW float64, del bool)(retVal Tensor) { + + retVal, err := ts.UpsampleBilinear2dOut(out, outputSize, alignCorners, scalesH, scalesW, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustUpsampleLinear1d(outputSize []int64, alignCorners bool, scales float64, del bool)(retVal Tensor) { + + retVal, err := ts.UpsampleLinear1d(outputSize, alignCorners, scales, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustUpsampleLinear1dBackward(gradOutput Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scales float64)(retVal Tensor) { + + retVal, err := UpsampleLinear1dBackward(gradOutput, outputSize, inputSize, alignCorners, scales) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustUpsampleLinear1dBackwardOut(gradInput Tensor, gradOutput Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scales float64)(retVal Tensor) { + + retVal, err := UpsampleLinear1dBackwardOut(gradInput, gradOutput, outputSize, inputSize, alignCorners, scales) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustUpsampleLinear1dOut(out Tensor, outputSize []int64, alignCorners bool, scales float64, del bool)(retVal Tensor) { + + retVal, err := ts.UpsampleLinear1dOut(out, outputSize, alignCorners, scales, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustUpsampleNearest1d(outputSize []int64, scales float64, del bool)(retVal Tensor) { + + retVal, err := ts.UpsampleNearest1d(outputSize, scales, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustUpsampleNearest1dBackward(gradOutput Tensor, outputSize []int64, inputSize []int64, scales float64)(retVal Tensor) { + + retVal, err := UpsampleNearest1dBackward(gradOutput, outputSize, inputSize, scales) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustUpsampleNearest1dBackwardOut(gradInput Tensor, gradOutput Tensor, outputSize []int64, inputSize []int64, scales float64)(retVal Tensor) { + + retVal, err := UpsampleNearest1dBackwardOut(gradInput, gradOutput, outputSize, inputSize, scales) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustUpsampleNearest1dOut(out Tensor, outputSize []int64, scales float64, del bool)(retVal Tensor) { + + retVal, err := ts.UpsampleNearest1dOut(out, outputSize, scales, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustUpsampleNearest2d(outputSize []int64, scalesH float64, scalesW float64, del bool)(retVal Tensor) { + + retVal, err := ts.UpsampleNearest2d(outputSize, scalesH, scalesW, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustUpsampleNearest2dBackward(gradOutput Tensor, outputSize []int64, inputSize []int64, scalesH float64, scalesW float64)(retVal Tensor) { + + retVal, err := UpsampleNearest2dBackward(gradOutput, outputSize, inputSize, scalesH, scalesW) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustUpsampleNearest2dBackwardOut(gradInput Tensor, gradOutput Tensor, outputSize []int64, inputSize []int64, scalesH float64, scalesW float64)(retVal Tensor) { + + retVal, err := UpsampleNearest2dBackwardOut(gradInput, gradOutput, outputSize, inputSize, scalesH, scalesW) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustUpsampleNearest2dOut(out Tensor, outputSize []int64, scalesH float64, scalesW float64, del bool)(retVal Tensor) { + + retVal, err := ts.UpsampleNearest2dOut(out, outputSize, scalesH, scalesW, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustUpsampleNearest3d(outputSize []int64, scalesD float64, scalesH float64, scalesW float64, del bool)(retVal Tensor) { + + retVal, err := ts.UpsampleNearest3d(outputSize, scalesD, scalesH, scalesW, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustUpsampleNearest3dBackward(gradOutput Tensor, outputSize []int64, inputSize []int64, scalesD float64, scalesH float64, scalesW float64)(retVal Tensor) { + + retVal, err := UpsampleNearest3dBackward(gradOutput, outputSize, inputSize, scalesD, scalesH, scalesW) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustUpsampleNearest3dBackwardOut(gradInput Tensor, gradOutput Tensor, outputSize []int64, inputSize []int64, scalesD float64, scalesH float64, scalesW float64)(retVal Tensor) { + + retVal, err := UpsampleNearest3dBackwardOut(gradInput, gradOutput, outputSize, inputSize, scalesD, scalesH, scalesW) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustUpsampleNearest3dOut(out Tensor, outputSize []int64, scalesD float64, scalesH float64, scalesW float64, del bool)(retVal Tensor) { + + retVal, err := ts.UpsampleNearest3dOut(out, outputSize, scalesD, scalesH, scalesW, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustUpsampleTrilinear3d(outputSize []int64, alignCorners bool, scalesD float64, scalesH float64, scalesW float64, del bool)(retVal Tensor) { + + retVal, err := ts.UpsampleTrilinear3d(outputSize, alignCorners, scalesD, scalesH, scalesW, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustUpsampleTrilinear3dBackward(gradOutput Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesD float64, scalesH float64, scalesW float64)(retVal Tensor) { + + retVal, err := UpsampleTrilinear3dBackward(gradOutput, outputSize, inputSize, alignCorners, scalesD, scalesH, scalesW) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustUpsampleTrilinear3dBackwardOut(gradInput Tensor, gradOutput Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesD float64, scalesH float64, scalesW float64)(retVal Tensor) { + + retVal, err := UpsampleTrilinear3dBackwardOut(gradInput, gradOutput, outputSize, inputSize, alignCorners, scalesD, scalesH, scalesW) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustUpsampleTrilinear3dOut(out Tensor, outputSize []int64, alignCorners bool, scalesD float64, scalesH float64, scalesW float64, del bool)(retVal Tensor) { + + retVal, err := ts.UpsampleTrilinear3dOut(out, outputSize, alignCorners, scalesD, scalesH, scalesW, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustValues(del bool)(retVal Tensor) { + + retVal, err := ts.Values(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustVar(unbiased bool, del bool)(retVal Tensor) { + + retVal, err := ts.Var(unbiased, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustVar1(dim []int64, unbiased bool, keepdim bool, del bool)(retVal Tensor) { + + retVal, err := ts.Var1(dim, unbiased, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustVarOut(out Tensor, dim []int64, unbiased bool, keepdim bool, del bool)(retVal Tensor) { + + retVal, err := ts.VarOut(out, dim, unbiased, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustView(size []int64, del bool)(retVal Tensor) { + + retVal, err := ts.View(size, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustViewAs(other Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.ViewAs(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustWhere1(condition Tensor, other Tensor, del bool)(retVal Tensor) { + + retVal, err := ts.Where1(condition, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustZero_()() { + + err := ts.Zero_() + if err != nil { log.Fatal(err) } + + return +} + +func MustZeros(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor) { + + retVal, err := Zeros(size, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts Tensor) MustZerosLike(del bool)(retVal Tensor) { + + retVal, err := ts.ZerosLike(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustZerosOut(out Tensor, size []int64)(retVal Tensor) { + + retVal, err := ZerosOut(out, size) + if err != nil { log.Fatal(err) } + + return retVal +} +// End of implementing Tensor ================================= diff --git a/tensor/other.go b/tensor/other.go index b1845ff..1431a06 100644 --- a/tensor/other.go +++ b/tensor/other.go @@ -8,10 +8,12 @@ import ( // CrossEntropyForLogits computes the cross-entropy loss based on some logits and targets. func (ts Tensor) CrossEntropyForLogits(targets Tensor) (retVal Tensor) { - // return ts.MustLogSoftmax(-1, gotch.Float.CInt(), true).MustNllLoss(targets, true) + weight := NewTensor() + reduction := int64(1) // Mean of loss + ignoreIndex := int64(-100) - logSm := ts.MustLogSoftmax(-1, gotch.Float.CInt(), true) - return logSm.MustNllLoss(targets, true) + logSm := ts.MustLogSoftmax(-1, gotch.Float, true) + return logSm.MustNllLoss(targets, weight, reduction, ignoreIndex, true) } // AccuracyForLogits returns the average accuracy for some given logits assuming that @@ -19,11 +21,11 @@ func (ts Tensor) CrossEntropyForLogits(targets Tensor) (retVal Tensor) { func (ts Tensor) AccuracyForLogits(targets Tensor) (retVal Tensor) { argmax := ts.MustArgmax(-1, false, true) eq1 := argmax.MustEq1(targets, true) - return eq1.MustTotype(gotch.Float, true).MustMean(gotch.Float.CInt(), true) + return eq1.MustTotype(gotch.Float, true).MustMean(gotch.Float, true) } func (ts Tensor) MaxPool2DDefault(ksize int64, del bool) (retVal Tensor) { - return ts.MustMaxPool2D([]int64{ksize, ksize}, []int64{ksize, ksize}, []int64{0, 0}, []int64{1, 1}, false, del) + return ts.MustMaxPool2d([]int64{ksize, ksize}, []int64{ksize, ksize}, []int64{0, 0}, []int64{1, 1}, false, del) } // TODO: continue diff --git a/tensor/patch.go b/tensor/patch.go new file mode 100644 index 0000000..43a6d69 --- /dev/null +++ b/tensor/patch.go @@ -0,0 +1,154 @@ +package tensor + +// #include "stdlib.h" +import "C" + +import ( + "log" + "unsafe" + + // "github.com/sugarme/gotch" + lib "github.com/sugarme/gotch/libtch" +) + +// NOTE. This is a temporarily patched to make it run. +// TODO. make change at generator for []Tensor input + +func (ts Tensor) Lstm(hxData []Tensor, paramsData []Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool, batchFirst bool) (output, h, c Tensor, err error) { + + // NOTE: `atg_lstm` will create 3 consecutive Ctensors in memory of C land. The first + // Ctensor will have address given by `ctensorPtr1` here. + // The next pointers can be calculated based on `ctensorPtr1` + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr1))) + ctensorPtr3 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr2)) + unsafe.Sizeof(ctensorPtr1))) + + var chxData []lib.Ctensor + for _, t := range hxData { + chxData = append(chxData, t.ctensor) + } + + var cparamsData []lib.Ctensor + for _, t := range paramsData { + cparamsData = append(cparamsData, t.ctensor) + } + + var chasBiases int32 = 0 + if hasBiases { + chasBiases = 1 + } + var ctrain int32 = 0 + if train { + ctrain = 1 + } + var cbidirectional int32 = 0 + if bidirectional { + cbidirectional = 1 + } + var cbatchFirst int32 = 0 + if batchFirst { + cbatchFirst = 1 + } + + lib.AtgLstm(ctensorPtr1, ts.ctensor, chxData, len(hxData), cparamsData, len(paramsData), chasBiases, numLayers, dropout, ctrain, cbidirectional, cbatchFirst) + err = TorchErr() + if err != nil { + return output, h, c, err + } + + return Tensor{ctensor: *ctensorPtr1}, Tensor{ctensor: *ctensorPtr2}, Tensor{ctensor: *ctensorPtr3}, nil + +} + +func (ts Tensor) MustLstm(hxData []Tensor, paramsData []Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool, batchFirst bool) (output, h, c Tensor) { + output, h, c, err := ts.Lstm(hxData, paramsData, hasBiases, numLayers, dropout, train, bidirectional, batchFirst) + + if err != nil { + log.Fatal(err) + } + + return output, h, c +} + +func (ts Tensor) Gru(hx Tensor, paramsData []Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool, batchFirst bool) (output, h Tensor, err error) { + + // NOTE: `atg_gru` will create 2 consecutive Ctensors in memory of C land. + // The first Ctensor will have address given by `ctensorPtr1` here. + // The next pointer can be calculated based on `ctensorPtr1` + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr1))) + + var cparamsData []lib.Ctensor + for _, t := range paramsData { + cparamsData = append(cparamsData, t.ctensor) + } + + var chasBiases int32 = 0 + if hasBiases { + chasBiases = 1 + } + var ctrain int32 = 0 + if train { + ctrain = 1 + } + var cbidirectional int32 = 0 + if bidirectional { + cbidirectional = 1 + } + var cbatchFirst int32 = 0 + if batchFirst { + cbatchFirst = 1 + } + + lib.AtgGru(ctensorPtr1, ts.ctensor, hx.ctensor, cparamsData, len(paramsData), chasBiases, numLayers, dropout, ctrain, cbidirectional, cbatchFirst) + err = TorchErr() + if err != nil { + return output, h, err + } + + return Tensor{ctensor: *ctensorPtr1}, Tensor{ctensor: *ctensorPtr2}, nil + +} + +func (ts Tensor) MustGru(hx Tensor, paramsData []Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool, batchFirst bool) (output, h Tensor) { + output, h, err := ts.Gru(hx, paramsData, hasBiases, numLayers, dropout, train, bidirectional, batchFirst) + if err != nil { + log.Fatal(err) + } + + return output, h +} + +func (ts Tensor) TopK(k int64, dim int64, largest bool, sorted bool) (ts1 Tensor, ts2 Tensor, err error) { + + // NOTE: `lib.AtgTopk` will return 2 tensors in C memory. First tensor pointer + // is given by ctensorPtr1 + ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr1))) + var clargest int32 = 0 + if largest { + clargest = 1 + } + var csorted int32 = 0 + if sorted { + csorted = 1 + } + + lib.AtgTopk(ctensorPtr1, ts.ctensor, k, dim, clargest, csorted) + err = TorchErr() + if err != nil { + return ts1, ts2, err + } + + return Tensor{ctensor: *ctensorPtr1}, Tensor{ctensor: *ctensorPtr2}, nil +} + +func (ts Tensor) MustTopK(k int64, dim int64, largest bool, sorted bool) (ts1 Tensor, ts2 Tensor) { + + ts1, ts2, err := ts.TopK(k, dim, largest, sorted) + if err != nil { + log.Fatal(err) + } + + return ts1, ts2 +} diff --git a/tensor/tensor-generated-sample.go b/tensor/tensor-generated-sample.go deleted file mode 100644 index d915f06..0000000 --- a/tensor/tensor-generated-sample.go +++ /dev/null @@ -1,2270 +0,0 @@ -// NOTE: this is a sample for OCaml generated code for `tensor-generated.go` -package tensor - -// #include "stdlib.h" -import "C" - -import ( - "log" - "unsafe" - - "github.com/sugarme/gotch" - lib "github.com/sugarme/gotch/libtch" -) - -func (ts Tensor) To(device gotch.Device, del bool) (retVal Tensor, err error) { - - // TODO: how to get pointer to CUDA memory??? - // C.cuMemAlloc((*C.ulonglong)(cudaPtr), 1) // 0 byte is invalid - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - if del { - defer ts.MustDrop() - } - - lib.AtgTo((*lib.Ctensor)(ptr), ts.ctensor, int(device.CInt())) - - if err = TorchErr(); err != nil { - return retVal, err - } - - return Tensor{ctensor: *ptr}, nil -} - -func (ts Tensor) MustTo(device gotch.Device, del bool) (retVal Tensor) { - var err error - retVal, err = ts.To(device, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts Tensor) Matmul(other Tensor, del bool) (retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - if del { - defer ts.MustDrop() - } - lib.AtgMatmul(ptr, ts.ctensor, other.ctensor) - - if err = TorchErr(); err != nil { - return retVal, err - } - - return Tensor{ctensor: *ptr}, nil -} - -func (ts Tensor) MustMatMul(other Tensor, del bool) (retVal Tensor) { - retVal, err := ts.Matmul(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts Tensor) Grad() (retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - lib.AtgGrad(ptr, ts.ctensor) - - if err = TorchErr(); err != nil { - return retVal, err - } - - return Tensor{ctensor: *ptr}, nil -} - -func (ts Tensor) MustGrad() (retVal Tensor) { - retVal, err := ts.Grad() - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts Tensor) Detach_() { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - lib.AtgDetach_(ptr, ts.ctensor) - - if err := TorchErr(); err != nil { - log.Fatal(err) - } -} - -func (ts Tensor) Detach() (retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - lib.AtgDetach(ptr, ts.ctensor) - - if err = TorchErr(); err != nil { - return retVal, err - } - - return Tensor{ctensor: *ptr}, nil - -} - -func (ts Tensor) MustDetach() (retVal Tensor) { - retVal, err := ts.Detach() - if err != nil { - log.Fatal(err) - } - - return retVal - -} - -func (ts Tensor) Zero_() { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - lib.AtgZero_(ptr, ts.ctensor) - - if err := TorchErr(); err != nil { - log.Fatal(err) - } -} - -func (ts Tensor) SetRequiresGrad(rb bool) (retVal Tensor, err error) { - var r int = 0 - if rb { - r = 1 - } - - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSetRequiresGrad(ptr, ts.ctensor, r) - - if err = TorchErr(); err != nil { - return retVal, err - } - - return Tensor{ctensor: *ptr}, nil -} - -func (ts Tensor) MustSetRequiresGrad(rb bool) (retVal Tensor) { - retVal, err := ts.SetRequiresGrad(rb) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts Tensor) Mul(other Tensor, del bool) (retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - if del { - defer ts.MustDrop() - } - lib.AtgMul(ptr, ts.ctensor, other.ctensor) - - if err = TorchErr(); err != nil { - return retVal, err - } - - return Tensor{ctensor: *ptr}, nil -} - -func (ts Tensor) MustMul(other Tensor, del bool) (retVal Tensor) { - retVal, err := ts.Mul(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts Tensor) Mul1(other Scalar, del bool) (retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - if del { - defer ts.MustDrop() - } - lib.AtgMul1(ptr, ts.ctensor, other.cscalar) - - if err = TorchErr(); err != nil { - return retVal, err - } - - return Tensor{ctensor: *ptr}, nil -} - -func (ts Tensor) MustMul1(other Scalar, del bool) (retVal Tensor) { - retVal, err := ts.Mul1(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts Tensor) Mul_(other Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - lib.AtgMul_(ptr, ts.ctensor, other.ctensor) - - if err = TorchErr(); err != nil { - return err - } - - return nil -} - -func (ts Tensor) MustMul_(other Tensor) { - err := ts.Mul_(other) - if err != nil { - log.Fatal(err) - } -} - -func (ts Tensor) Mul1_(other Scalar) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - lib.AtgMul1_(ptr, ts.ctensor, other.cscalar) - - if err := TorchErr(); err != nil { - log.Fatal(err) - } -} - -func (ts Tensor) Add(other Tensor, del bool) (retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - if del { - defer ts.MustDrop() - } - lib.AtgAdd(ptr, ts.ctensor, other.ctensor) - - if err = TorchErr(); err != nil { - return retVal, err - } - - return Tensor{ctensor: *ptr}, nil -} - -func (ts Tensor) MustAdd(other Tensor, del bool) (retVal Tensor) { - retVal, err := ts.Add(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts Tensor) Add_(other Tensor) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - lib.AtgAdd_(ptr, ts.ctensor, other.ctensor) - - if err := TorchErr(); err != nil { - log.Fatal(err) - } -} - -func (ts Tensor) Add1(other Scalar, del bool) (retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - if del { - defer ts.MustDrop() - } - lib.AtgAdd1(ptr, ts.ctensor, other.cscalar) - - if err = TorchErr(); err != nil { - return retVal, err - } - - return Tensor{ctensor: *ptr}, nil -} - -func (ts Tensor) MustAdd1(other Scalar, del bool) (retVal Tensor) { - retVal, err := ts.Add1(other, del) - - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts Tensor) Add1_(other Scalar) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - lib.AtgAdd1_(ptr, ts.ctensor, other.cscalar) - - if err := TorchErr(); err != nil { - log.Fatal(err) - } -} - -func (ts Tensor) AddG(other Tensor) (err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - lib.AtgAdd(ptr, ts.ctensor, other.ctensor) - - if err = TorchErr(); err != nil { - return err - } - - ts = Tensor{ctensor: *ptr} - - return nil -} - -func (ts Tensor) MustAddG(other Tensor) { - err := ts.AddG(other) - if err != nil { - log.Fatal(err) - } -} - -// Totype casts type of tensor to a new tensor with specified DType -func (ts Tensor) Totype(dtype gotch.DType, del bool) (retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - if del { - defer ts.MustDrop() - } - cint, err := gotch.DType2CInt(dtype) - if err != nil { - return retVal, err - } - - lib.AtgTotype(ptr, ts.ctensor, cint) - if err = TorchErr(); err != nil { - return retVal, err - } - - retVal = Tensor{ctensor: *ptr} - - return retVal, nil -} - -// Totype casts type of tensor to a new tensor with specified DType. It will -// panic if error -func (ts Tensor) MustTotype(dtype gotch.DType, del bool) (retVal Tensor) { - retVal, err := ts.Totype(dtype, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -// Unsqueeze unsqueezes tensor to specified dimension. -func (ts Tensor) Unsqueeze(dim int64, del bool) (retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - if del { - defer ts.MustDrop() - } - lib.AtgUnsqueeze(ptr, ts.ctensor, dim) - if err = TorchErr(); err != nil { - return retVal, err - } - - retVal = Tensor{ctensor: *ptr} - - return retVal, nil -} - -func (ts Tensor) MustUnsqueeze(dim int64, del bool) (retVal Tensor) { - retVal, err := ts.Unsqueeze(dim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -// Select creates a new tensor from current tensor given dim and index. -func (ts Tensor) Select(dim int64, index int64, del bool) (retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - if del { - defer ts.MustDrop() - } - lib.AtgSelect(ptr, ts.ctensor, dim, index) - if err = TorchErr(); err != nil { - return retVal, err - } - - retVal = Tensor{ctensor: *ptr} - - return retVal, nil -} - -func (ts Tensor) MustSelect(dim int64, index int64, del bool) (retVal Tensor) { - retVal, err := ts.Select(dim, index, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -// Narrow creates a new tensor from current tensor given dim and start index -// and length. -func (ts Tensor) Narrow(dim int64, start int64, length int64, del bool) (retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - if del { - defer ts.MustDrop() - } - - lib.AtgNarrow(ptr, ts.ctensor, dim, start, length) - if err = TorchErr(); err != nil { - return retVal, err - } - - retVal = Tensor{ctensor: *ptr} - - return retVal, nil -} - -func (ts Tensor) MustNarrow(dim int64, start int64, length int64, del bool) (retVal Tensor) { - retVal, err := ts.Narrow(dim, start, length, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -// IndexSelect creates a new tensor from current tensor given dim and index -// tensor. -func (ts Tensor) IndexSelect(dim int64, index Tensor, del bool) (retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - if del { - defer ts.MustDrop() - } - - lib.AtgIndexSelect(ptr, ts.ctensor, dim, index.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - - retVal = Tensor{ctensor: *ptr} - - return retVal, nil -} -func (ts Tensor) MustIndexSelect(dim int64, index Tensor, del bool) (retVal Tensor) { - retVal, err := ts.IndexSelect(dim, index, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Zeros(size []int64, optionsKind, optionsDevice int32) (retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgZeros(ptr, size, len(size), optionsKind, optionsDevice) - if err = TorchErr(); err != nil { - return retVal, err - } - - retVal = Tensor{ctensor: *ptr} - - return retVal, nil -} - -func MustZeros(size []int64, optionsKind, optionsDevice int32) (retVal Tensor) { - retVal, err := Zeros(size, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - return retVal -} - -func Ones(size []int64, optionsKind, optionsDevice int32) (retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgOnes(ptr, size, len(size), optionsKind, optionsDevice) - if err = TorchErr(); err != nil { - return retVal, err - } - - retVal = Tensor{ctensor: *ptr} - - return retVal, nil -} - -func MustOnes(size []int64, optionsKind, optionsDevice int32) (retVal Tensor) { - retVal, err := Ones(size, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - return retVal -} - -// NOTE: `_` denotes "in-place". -func (ts Tensor) Uniform_(from float64, to float64) { - var err error - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgUniform_(ptr, ts.ctensor, from, to) - if err = TorchErr(); err != nil { - log.Fatal(err) - } -} - -func (ts Tensor) ZerosLike(del bool) (retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - if del { - defer ts.MustDrop() - } - lib.AtgZerosLike(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - - retVal = Tensor{ctensor: *ptr} - - return retVal, nil -} - -func (ts Tensor) Fill_(value Scalar) { - var err error - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - lib.AtgFill_(ptr, ts.ctensor, value.cscalar) - - if err = TorchErr(); err != nil { - log.Fatal(err) - } -} - -func (ts Tensor) RandnLike(del bool) (retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - if del { - defer ts.MustDrop() - } - lib.AtgRandnLike(ptr, ts.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - - retVal = Tensor{ctensor: *ptr} - - return retVal, nil -} - -func (ts Tensor) Permute(dims []int64, del bool) (retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - if del { - defer ts.MustDrop() - } - lib.AtgPermute(ptr, ts.ctensor, dims, len(dims)) - - if err = TorchErr(); err != nil { - return retVal, err - } - - retVal = Tensor{ctensor: *ptr} - - return retVal, nil -} - -func (ts Tensor) Squeeze1(dim int64, del bool) (retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - if del { - defer ts.MustDrop() - } - - lib.AtgSqueeze1(ptr, ts.ctensor, dim) - - if err = TorchErr(); err != nil { - return retVal, err - } - - retVal = Tensor{ctensor: *ptr} - - return retVal, nil -} - -func (ts Tensor) MustSqueeze1(dim int64, del bool) (retVal Tensor) { - var err error - retVal, err = ts.Squeeze1(dim, del) - if err != nil { - log.Fatal(err) - } - return retVal -} - -func (ts Tensor) Squeeze_() { - var err error - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - defer C.free(unsafe.Pointer(ptr)) - - lib.AtgSqueeze_(ptr, ts.ctensor) - - if err = TorchErr(); err != nil { - log.Fatal(err) - } -} - -func Stack(tensors []Tensor, dim int64) (retVal Tensor, err error) { - // TODO: should we implement del param to delete tensors after stacking? - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - defer C.free(unsafe.Pointer(ptr)) - - var ctensors []lib.Ctensor - for _, t := range tensors { - ctensors = append(ctensors, t.ctensor) - } - - lib.AtgStack(ptr, ctensors, len(tensors), dim) - if err = TorchErr(); err != nil { - return retVal, err - } - - retVal = Tensor{ctensor: *ptr} - - return retVal, nil -} - -func Cat(tensors []Tensor, dim int64, del bool) (retVal Tensor, err error) { - if del { - for _, t := range tensors { - defer t.MustDrop() - } - } - - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var ctensors []lib.Ctensor - for _, t := range tensors { - ctensors = append(ctensors, t.ctensor) - } - - lib.AtgCat(ptr, ctensors, len(tensors), dim) - if err = TorchErr(); err != nil { - return retVal, err - } - - retVal = Tensor{ctensor: *ptr} - return retVal, nil -} - -func MustCat(tensors []Tensor, dim int64, del bool) (retVal Tensor) { - retVal, err := Cat(tensors, dim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts Tensor) Mm(mat2 Tensor, del bool) (retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - if del { - defer ts.MustDrop() - } - - lib.AtgMm(ptr, ts.ctensor, mat2.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - - retVal = Tensor{ctensor: *ptr} - - return retVal, nil -} - -func (ts Tensor) MustMm(mat2 Tensor, del bool) (retVal Tensor) { - retVal, err := ts.Mm(mat2, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts Tensor) LogSoftmax(dim int64, dtype int32, del bool) (retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - if del { - defer ts.MustDrop() - } - - lib.AtgLogSoftmax(ptr, ts.ctensor, dim, dtype) - if err = TorchErr(); err != nil { - return retVal, err - } - - retVal = Tensor{ctensor: *ptr} - - return retVal, nil -} - -func (ts Tensor) MustLogSoftmax(dim int64, dtype int32, del bool) (retVal Tensor) { - retVal, err := ts.LogSoftmax(dim, dtype, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts Tensor) NllLoss(target Tensor, del bool) (retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - if del { - defer ts.MustDrop() - } - - weight := NewTensor() - - reduction := int64(1) // Mean of loss - ignoreIndex := int64(-100) - defer C.free(unsafe.Pointer(ptr)) - - lib.AtgNllLoss(ptr, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex) - if err = TorchErr(); err != nil { - return retVal, err - } - - retVal = Tensor{ctensor: *ptr} - - return retVal, nil -} - -func (ts Tensor) MustNllLoss(target Tensor, del bool) (retVal Tensor) { - retVal, err := ts.NllLoss(target, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts Tensor) Argmax(dim int64, keepDim bool, del bool) (retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - if del { - defer ts.MustDrop() - } - - var ckeepDim int = 0 - if keepDim { - ckeepDim = 1 - } - - lib.AtgArgmax(ptr, ts.ctensor, dim, ckeepDim) - if err = TorchErr(); err != nil { - return retVal, err - } - - retVal = Tensor{ctensor: *ptr} - - return retVal, nil -} - -func (ts Tensor) MustArgmax(dim int64, keepDim bool, del bool) (retVal Tensor) { - retVal, err := ts.Argmax(dim, keepDim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts Tensor) Mean(dtype int32, del bool) (retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - if del { - defer ts.MustDrop() - } - - lib.AtgMean(ptr, ts.ctensor, dtype) - if err = TorchErr(); err != nil { - return retVal, err - } - - retVal = Tensor{ctensor: *ptr} - - return retVal, nil -} - -func (ts Tensor) MustMean(dtype int32, del bool) (retVal Tensor) { - retVal, err := ts.Mean(dtype, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts Tensor) Mean1(dims []int64, keepDim bool, dtype gotch.DType, del bool) (retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - if del { - defer ts.MustDrop() - } - - ckeepDim := 0 - if keepDim { - ckeepDim = 1 - } - - lib.AtgMean1(ptr, ts.ctensor, dims, len(dims), ckeepDim, dtype.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - - retVal = Tensor{ctensor: *ptr} - - return retVal, nil -} - -func (ts Tensor) MustMean1(dims []int64, keepDim bool, dtype gotch.DType, del bool) (retVal Tensor) { - retVal, err := ts.Mean1(dims, keepDim, dtype, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts Tensor) View(sizeData []int64, del bool) (retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - if del { - defer ts.MustDrop() - } - - lib.AtgView(ptr, ts.ctensor, sizeData, len(sizeData)) - if err = TorchErr(); err != nil { - return retVal, err - } - - retVal = Tensor{ctensor: *ptr} - - return retVal, nil -} - -func (ts Tensor) MustView(sizeData []int64, del bool) (retVal Tensor) { - retVal, err := ts.View(sizeData, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts Tensor) Div1(other Scalar, del bool) (retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - if del { - defer ts.MustDrop() - } - - lib.AtgDiv1(ptr, ts.ctensor, other.cscalar) - if err = TorchErr(); err != nil { - return retVal, err - } - - retVal = Tensor{ctensor: *ptr} - - return retVal, nil -} - -func (ts Tensor) MustDiv1(other Scalar, del bool) (retVal Tensor) { - retVal, err := ts.Div1(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts Tensor) Div(other Tensor, del bool) (retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - if del { - defer ts.MustDrop() - } - - lib.AtgDiv(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - - retVal = Tensor{ctensor: *ptr} - - return retVal, nil -} - -func (ts Tensor) MustDiv(other Tensor, del bool) (retVal Tensor) { - retVal, err := ts.Div(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts Tensor) Div_(other Tensor) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - lib.AtgDiv_(ptr, ts.ctensor, other.ctensor) - if err := TorchErr(); err != nil { - log.Fatal(err) - } -} - -func (ts Tensor) Div1_(other Scalar) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - lib.AtgDiv1_(ptr, ts.ctensor, other.cscalar) - if err := TorchErr(); err != nil { - log.Fatal(err) - } -} - -func Randperm(n int64, optionKind gotch.DType, optionDevice gotch.Device) (retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRandperm(ptr, n, optionKind.CInt(), optionDevice.CInt()) - if err = TorchErr(); err != nil { - return retVal, err - } - - retVal = Tensor{ctensor: *ptr} - - return retVal, nil -} - -func MustRandperm(n int64, optionKind gotch.DType, optionDevice gotch.Device) (retVal Tensor) { - retVal, err := Randperm(n, optionKind, optionDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts Tensor) Clamp_(min Scalar, max Scalar) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgClamp_(ptr, ts.ctensor, min.cscalar, max.cscalar) - if err := TorchErr(); err != nil { - log.Fatal(err) - } -} - -func (ts Tensor) Clamp(min Scalar, max Scalar, del bool) (retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - if del { - defer ts.MustDrop() - } - - lib.AtgClamp(ptr, ts.ctensor, min.cscalar, max.cscalar) - err = TorchErr() - if err != nil { - return retVal, err - } - - retVal = Tensor{ctensor: *ptr} - return retVal, nil -} - -func (ts Tensor) MustClamp(min Scalar, max Scalar, del bool) (retVal Tensor) { - - retVal, err := ts.Clamp(min, max, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts Tensor) ClampMax(max Scalar, del bool) (retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - if del { - defer ts.MustDrop() - } - - lib.AtgClampMax(ptr, ts.ctensor, max.cscalar) - err = TorchErr() - if err != nil { - return retVal, err - } - - retVal = Tensor{ctensor: *ptr} - return retVal, nil -} - -func (ts Tensor) MustClampMax(max Scalar, del bool) (retVal Tensor) { - retVal, err := ts.ClampMax(max, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts Tensor) Relu_() { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRelu_(ptr, ts.ctensor) - if err := TorchErr(); err != nil { - log.Fatal(err) - } -} - -func (ts Tensor) Relu(del bool) (retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - if del { - defer ts.MustDrop() - } - - lib.AtgRelu(ptr, ts.ctensor) - err = TorchErr() - if err != nil { - return retVal, err - } - - retVal = Tensor{ctensor: *ptr} - - return retVal, nil -} - -func (ts Tensor) MustRelu(del bool) (retVal Tensor) { - retVal, err := ts.Relu(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts Tensor) T(del bool) (retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - if del { - defer ts.MustDrop() - } - - lib.AtgT(ptr, ts.ctensor) - err = TorchErr() - if err != nil { - return retVal, err - } - - retVal = Tensor{ctensor: *ptr} - - return retVal, nil -} - -func (ts Tensor) MustT(del bool) (retVal Tensor) { - retVal, err := ts.T(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts Tensor) T_() { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - defer C.free(unsafe.Pointer(ptr)) - - lib.AtgT_(ptr, ts.ctensor) - err := TorchErr() - if err != nil { - log.Fatal(err) - } -} - -func (ts Tensor) MseLoss(target Tensor, reduction int, del bool) (retVal Tensor, err error) { - - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - if del { - defer ts.MustDrop() - } - - lib.AtgMseLoss(ptr, ts.ctensor, target.ctensor, reduction) - err = TorchErr() - if err != nil { - return retVal, err - } - - retVal = Tensor{ctensor: *ptr} - - return retVal, nil -} - -func (ts Tensor) MustMseLoss(target Tensor, reduction int, del bool) (retVal Tensor) { - retVal, err := ts.MseLoss(target, reduction, del) - - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts Tensor) Exp(del bool) (retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - if del { - defer ts.MustDrop() - } - - lib.AtgExp(ptr, ts.ctensor) - err = TorchErr() - if err != nil { - return retVal, err - } - - retVal = Tensor{ctensor: *ptr} - - return retVal, nil -} - -func (ts Tensor) MustExp(del bool) (retVal Tensor) { - retVal, err := ts.Exp(del) - - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts Tensor) Exp_() { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - defer C.free(unsafe.Pointer(ptr)) - - lib.AtgExp(ptr, ts.ctensor) - err := TorchErr() - if err != nil { - log.Fatal(err) - } -} - -func (ts Tensor) Pow(exponent Scalar, del bool) (retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - if del { - defer ts.MustDrop() - } - - lib.AtgPow(ptr, ts.ctensor, exponent.cscalar) - err = TorchErr() - if err != nil { - return retVal, err - } - - retVal = Tensor{ctensor: *ptr} - - return retVal, nil -} - -func (ts Tensor) MustPow(exponent Scalar, del bool) (retVal Tensor) { - retVal, err := ts.Pow(exponent, del) - - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts Tensor) Sum(dtype int32, del bool) (retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - if del { - defer ts.MustDrop() - } - - lib.AtgSum(ptr, ts.ctensor, dtype) - err = TorchErr() - if err != nil { - return retVal, err - } - - retVal = Tensor{ctensor: *ptr} - - return retVal, nil -} - -func (ts Tensor) MustSum(dtype int32, del bool) (retVal Tensor) { - retVal, err := ts.Sum(dtype, del) - - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts Tensor) Sub(other Tensor, del bool) (retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - if del { - defer ts.MustDrop() - } - - lib.AtgSub(ptr, ts.ctensor, other.ctensor) - err = TorchErr() - if err != nil { - return retVal, err - } - - retVal = Tensor{ctensor: *ptr} - - return retVal, nil -} - -func (ts Tensor) MustSub(other Tensor, del bool) (retVal Tensor) { - retVal, err := ts.Sub(other, del) - - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts Tensor) Sub1(other Scalar, del bool) (retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - if del { - defer ts.MustDrop() - } - - lib.AtgSub1(ptr, ts.ctensor, other.cscalar) - err = TorchErr() - if err != nil { - return retVal, err - } - - retVal = Tensor{ctensor: *ptr} - - return retVal, nil -} - -func (ts Tensor) MustSub1(other Scalar, del bool) (retVal Tensor) { - retVal, err := ts.Sub1(other, del) - - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts Tensor) Sub_(other Tensor) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgSub_(ptr, ts.ctensor, other.ctensor) - err := TorchErr() - if err != nil { - log.Fatal(err) - } -} - -func (ts Tensor) Sub1_(other Scalar) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - lib.AtgSub1_(ptr, ts.ctensor, other.cscalar) - err := TorchErr() - if err != nil { - log.Fatal(err) - } -} - -func Conv1D(input, weight, bias Tensor, stride, padding, dilation []int64, groups int64) (retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgConv1d(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation), groups) - - err = TorchErr() - if err != nil { - return retVal, err - } - - retVal = Tensor{ctensor: *ptr} - - return retVal, nil -} - -func MustConv1D(input, weight, bias Tensor, stride, padding, dilation []int64, groups int64) (retVal Tensor) { - retVal, err := Conv1D(input, weight, bias, stride, padding, dilation, groups) - - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Conv2D(input, weight, bias Tensor, stride, padding, dilation []int64, groups int64) (retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgConv2d(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation), groups) - - err = TorchErr() - if err != nil { - return retVal, err - } - - retVal = Tensor{ctensor: *ptr} - - return retVal, nil -} - -func MustConv2D(input, weight, bias Tensor, stride, padding, dilation []int64, groups int64) (retVal Tensor) { - retVal, err := Conv2D(input, weight, bias, stride, padding, dilation, groups) - - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Conv3D(input, weight, bias Tensor, stride, padding, dilation []int64, groups int64) (retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgConv3d(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation), groups) - - err = TorchErr() - if err != nil { - return retVal, err - } - - retVal = Tensor{ctensor: *ptr} - - return retVal, nil -} - -func MustConv3D(input, weight, bias Tensor, stride, padding, dilation []int64, groups int64) (retVal Tensor) { - retVal, err := Conv3D(input, weight, bias, stride, padding, dilation, groups) - - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts Tensor) MaxPool2D(kernel []int64, stride []int64, padding []int64, dilation []int64, ceil bool, del bool) (retVal Tensor, err error) { - - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - if del { - defer ts.MustDrop() - } - - var ceilMode int - switch ceil { - case true: - ceilMode = 1 - case false: - ceilMode = 0 - } - - lib.AtgMaxPool2d(ptr, ts.ctensor, kernel, len(kernel), stride, len(stride), padding, len(padding), dilation, len(dilation), ceilMode) - - err = TorchErr() - if err != nil { - return retVal, err - } - - retVal = Tensor{ctensor: *ptr} - - return retVal, nil -} - -func (ts Tensor) MustMaxPool2D(kernel []int64, stride []int64, padding []int64, dilation []int64, ceil bool, del bool) (retVal Tensor) { - retVal, err := ts.MaxPool2D(kernel, stride, padding, dilation, ceil, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts Tensor) AvgPool2D(kernel []int64, stride []int64, padding []int64, ceil bool, countIncludePad bool, divisorOverride int64, del bool) (retVal Tensor, err error) { - - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - if del { - defer ts.MustDrop() - } - - var ceilMode int - switch ceil { - case true: - ceilMode = 1 - case false: - ceilMode = 0 - } - - var countIncludePadMode int = 0 - if countIncludePad { - countIncludePadMode = 1 - } - - lib.AtgAvgPool2d(ptr, ts.ctensor, kernel, len(kernel), stride, len(stride), padding, len(padding), ceilMode, countIncludePadMode, divisorOverride) - - err = TorchErr() - if err != nil { - return retVal, err - } - - retVal = Tensor{ctensor: *ptr} - - return retVal, nil -} - -func (ts Tensor) MustAvgPool2D(kernel []int64, stride []int64, padding []int64, ceil bool, countIncludePad bool, divisorOverride int64, del bool) (retVal Tensor) { - retVal, err := ts.AvgPool2D(kernel, stride, padding, ceil, countIncludePad, divisorOverride, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Dropout(input Tensor, p float64, train bool) (retVal Tensor, err error) { - - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var ctrain int - switch train { - case true: - ctrain = 1 - case false: - ctrain = 0 - } - - lib.AtgDropout(ptr, input.ctensor, p, ctrain) - err = TorchErr() - if err != nil { - return retVal, err - } - - retVal = Tensor{ctensor: *ptr} - - return retVal, nil - -} - -func MustDropout(input Tensor, p float64, train bool) (retVal Tensor) { - retVal, err := Dropout(input, p, train) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts Tensor) Dropout(p float64, train bool, del bool) (retVal Tensor, err error) { - - if del { - defer ts.MustDrop() - } - - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var ctrain int - switch train { - case true: - ctrain = 1 - case false: - ctrain = 0 - } - - lib.AtgDropout(ptr, ts.ctensor, p, ctrain) - err = TorchErr() - if err != nil { - return retVal, err - } - - retVal = Tensor{ctensor: *ptr} - - return retVal, nil - -} - -func (ts Tensor) MustDropout(p float64, train bool, del bool) (retVal Tensor) { - retVal, err := ts.Dropout(p, train, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts Tensor) Dropout_(p float64, train bool) { - - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - var ctrain int - switch train { - case true: - ctrain = 1 - case false: - ctrain = 0 - } - lib.AtgDropout_(ptr, ts.ctensor, p, ctrain) - err := TorchErr() - if err != nil { - log.Fatal(err) - } -} - -func ConvTranspose1D(input, weight, bias Tensor, stride, padding, outputPadding, dilation []int64, groups int64) (retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgConvTranspose1d(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, len(padding), outputPadding, len(outputPadding), dilation, len(dilation), groups) - - err = TorchErr() - if err != nil { - return retVal, err - } - - retVal = Tensor{ctensor: *ptr} - - return retVal, nil -} - -func MustConvTranspose1D(input, weight, bias Tensor, stride, padding, outputPadding, dilation []int64, groups int64) (retVal Tensor) { - retVal, err := ConvTranspose1D(input, weight, bias, stride, padding, outputPadding, dilation, groups) - - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func ConvTranspose2D(input, weight, bias Tensor, stride, padding, outputPadding, dilation []int64, groups int64) (retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgConvTranspose2d(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, len(padding), outputPadding, len(outputPadding), dilation, len(dilation), groups) - - err = TorchErr() - if err != nil { - return retVal, err - } - - retVal = Tensor{ctensor: *ptr} - - return retVal, nil -} - -func MustConvTranspose2D(input, weight, bias Tensor, stride, padding, outputPadding, dilation []int64, groups int64) (retVal Tensor) { - retVal, err := ConvTranspose2D(input, weight, bias, stride, padding, outputPadding, dilation, groups) - - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func ConvTranspose3D(input, weight, bias Tensor, stride, padding, outputPadding, dilation []int64, groups int64) (retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgConvTranspose3d(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, len(padding), outputPadding, len(outputPadding), dilation, len(dilation), groups) - - err = TorchErr() - if err != nil { - return retVal, err - } - - retVal = Tensor{ctensor: *ptr} - - return retVal, nil -} - -func MustConvTranspose3D(input, weight, bias Tensor, stride, padding, outputPadding, dilation []int64, groups int64) (retVal Tensor) { - retVal, err := ConvTranspose3D(input, weight, bias, stride, padding, outputPadding, dilation, groups) - - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts Tensor) LSTM(hxData []Tensor, paramsData []Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool, batchFirst bool) (output, h, c Tensor, err error) { - - // NOTE: `atg_lstm` will create 3 consecutive Ctensors in memory of C land. The first - // Ctensor will have address given by `ctensorPtr1` here. - // The next pointers can be calculated based on `ctensorPtr1` - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr1))) - ctensorPtr3 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr2)) + unsafe.Sizeof(ctensorPtr1))) - - var chxData []lib.Ctensor - for _, t := range hxData { - chxData = append(chxData, t.ctensor) - } - - var cparamsData []lib.Ctensor - for _, t := range paramsData { - cparamsData = append(cparamsData, t.ctensor) - } - - chasBiases := 0 - if hasBiases { - chasBiases = 1 - } - ctrain := 0 - if train { - ctrain = 1 - } - cbidirectional := 0 - if bidirectional { - cbidirectional = 1 - } - cbatchFirst := 0 - if batchFirst { - cbatchFirst = 1 - } - - lib.AtgLstm(ctensorPtr1, ts.ctensor, chxData, len(hxData), cparamsData, len(paramsData), chasBiases, numLayers, dropout, ctrain, cbidirectional, cbatchFirst) - err = TorchErr() - if err != nil { - return output, h, c, err - } - - return Tensor{ctensor: *ctensorPtr1}, Tensor{ctensor: *ctensorPtr2}, Tensor{ctensor: *ctensorPtr3}, nil - -} - -func (ts Tensor) MustLSTM(hxData []Tensor, paramsData []Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool, batchFirst bool) (output, h, c Tensor) { - output, h, c, err := ts.LSTM(hxData, paramsData, hasBiases, numLayers, dropout, train, bidirectional, batchFirst) - - if err != nil { - log.Fatal(err) - } - - return output, h, c -} - -func (ts Tensor) GRU(hx Tensor, paramsData []Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool, batchFirst bool) (output, h Tensor, err error) { - - // NOTE: `atg_gru` will create 2 consecutive Ctensors in memory of C land. - // The first Ctensor will have address given by `ctensorPtr1` here. - // The next pointer can be calculated based on `ctensorPtr1` - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr1))) - - var cparamsData []lib.Ctensor - for _, t := range paramsData { - cparamsData = append(cparamsData, t.ctensor) - } - - chasBiases := 0 - if hasBiases { - chasBiases = 1 - } - ctrain := 0 - if train { - ctrain = 1 - } - cbidirectional := 0 - if bidirectional { - cbidirectional = 1 - } - cbatchFirst := 0 - if batchFirst { - cbatchFirst = 1 - } - - lib.AtgGru(ctensorPtr1, ts.ctensor, hx.ctensor, cparamsData, len(paramsData), chasBiases, numLayers, dropout, ctrain, cbidirectional, cbatchFirst) - err = TorchErr() - if err != nil { - return output, h, err - } - - return Tensor{ctensor: *ctensorPtr1}, Tensor{ctensor: *ctensorPtr2}, nil - -} - -func (ts Tensor) MustGRU(hx Tensor, paramsData []Tensor, hasBiases bool, numLayers int64, dropout float64, train bool, bidirectional bool, batchFirst bool) (output, h Tensor) { - output, h, err := ts.GRU(hx, paramsData, hasBiases, numLayers, dropout, train, bidirectional, batchFirst) - if err != nil { - log.Fatal(err) - } - - return output, h -} - -func Randn(sizeData []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal Tensor, err error) { - - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRandn(ptr, sizeData, len(sizeData), optionsKind.CInt(), optionsDevice.CInt()) - err = TorchErr() - if err != nil { - return retVal, err - } - - retVal = Tensor{ctensor: *ptr} - - return retVal, nil -} - -func MustRandn(sizeData []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal Tensor) { - - retVal, err := Randn(sizeData, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Embedding(weight, indices Tensor, paddingIdx int64, scaleGradByFreq, sparse bool) (retVal Tensor, err error) { - - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - cscaleGradByFreq := 0 - if scaleGradByFreq { - cscaleGradByFreq = 1 - } - - csparse := 0 - if sparse { - csparse = 1 - } - - lib.AtgEmbedding(ptr, weight.ctensor, indices.ctensor, paddingIdx, cscaleGradByFreq, csparse) - err = TorchErr() - if err != nil { - return retVal, err - } - - retVal = Tensor{ctensor: *ptr} - - return retVal, nil -} - -func MustEmbedding(weight, indices Tensor, paddingIdx int64, scaleGradByFreq, sparse bool) (retVal Tensor) { - - retVal, err := Embedding(weight, indices, paddingIdx, scaleGradByFreq, sparse) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Randint(high int64, sizeData []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal Tensor, err error) { - - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgRandint(ptr, high, sizeData, len(sizeData), optionsKind.CInt(), optionsDevice.CInt()) - err = TorchErr() - if err != nil { - return retVal, err - } - - retVal = Tensor{ctensor: *ptr} - - return retVal, nil -} - -func MustRandint(high int64, sizeData []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal Tensor) { - - retVal, err := Randint(high, sizeData, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func LayerNorm(input Tensor, normalizedShape []int64, weight, bias Tensor, eps float64, cudnnEnable bool) (retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ccudnnEnable := 0 - if cudnnEnable { - ccudnnEnable = 1 - } - - lib.AtgLayerNorm(ptr, input.ctensor, normalizedShape, len(normalizedShape), weight.ctensor, bias.ctensor, eps, ccudnnEnable) - err = TorchErr() - if err != nil { - return retVal, err - } - - retVal = Tensor{ctensor: *ptr} - - return retVal, nil -} - -func MustLayerNorm(input Tensor, normalizedShape []int64, weight, bias Tensor, eps float64, cudnnEnable bool) (retVal Tensor) { - - retVal, err := LayerNorm(input, normalizedShape, weight, bias, eps, cudnnEnable) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func BatchNorm(input Tensor, weight, bias, runningMean, runningVar Tensor, train bool, momentum float64, eps float64, cudnnEnable bool) (retVal Tensor, err error) { - - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ccudnnEnable := 0 - if cudnnEnable { - ccudnnEnable = 1 - } - ctrain := 0 - if train { - ctrain = 1 - } - - lib.AtgBatchNorm(ptr, input.ctensor, weight.ctensor, bias.ctensor, runningMean.ctensor, runningVar.ctensor, ctrain, momentum, eps, ccudnnEnable) - err = TorchErr() - if err != nil { - return retVal, err - } - - retVal = Tensor{ctensor: *ptr} - - return retVal, nil - -} - -func MustBatchNorm(input Tensor, weight, bias, runningMean, runningVar Tensor, train bool, momentum float64, eps float64, cudnnEnable bool) (retVal Tensor) { - retVal, err := BatchNorm(input, weight, bias, runningMean, runningVar, train, momentum, eps, cudnnEnable) - - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts Tensor) TopK(k int64, dim int64, largest bool, sorted bool) (ts1 Tensor, ts2 Tensor, err error) { - - // NOTE: `lib.AtgTopk` will return 2 tensors in C memory. First tensor pointer - // is given by ctensorPtr1 - ctensorPtr1 := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ctensorPtr2 := (*lib.Ctensor)(unsafe.Pointer(uintptr(unsafe.Pointer(ctensorPtr1)) + unsafe.Sizeof(ctensorPtr1))) - clargest := 0 - if largest { - clargest = 1 - } - csorted := 0 - if sorted { - csorted = 1 - } - - lib.AtgTopk(ctensorPtr1, ts.ctensor, k, dim, clargest, csorted) - err = TorchErr() - if err != nil { - return ts1, ts2, err - } - - return Tensor{ctensor: *ctensorPtr1}, Tensor{ctensor: *ctensorPtr2}, nil -} - -func (ts Tensor) MustTopK(k int64, dim int64, largest bool, sorted bool) (ts1 Tensor, ts2 Tensor) { - - ts1, ts2, err := ts.TopK(k, dim, largest, sorted) - if err != nil { - log.Fatal(err) - } - - return ts1, ts2 -} - -func (ts Tensor) AdaptiveAvgPool2D(outputSizeData []int64) (retVal Tensor, err error) { - - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - lib.AtgAdaptiveAvgPool2d(ptr, ts.ctensor, outputSizeData, len(outputSizeData)) - err = TorchErr() - if err != nil { - return retVal, err - } - - retVal = Tensor{ctensor: *ptr} - - return retVal, nil -} - -func (ts Tensor) MustAdaptiveAvgPool2D(outputSizeData []int64) (retVal Tensor) { - retVal, err := ts.AdaptiveAvgPool2D(outputSizeData) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts Tensor) Softmax(dim int64, dtype gotch.DType, del bool) (retVal Tensor, err error) { - - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - lib.AtgSoftmax(ptr, ts.ctensor, dim, dtype.CInt()) - err = TorchErr() - if err != nil { - return retVal, err - } - - retVal = Tensor{ctensor: *ptr} - - return retVal, nil -} - -func (ts Tensor) MustSoftmax(dim int64, dtype gotch.DType, del bool) (retVal Tensor) { - retVal, err := ts.Softmax(dim, dtype, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts Tensor) ConstantPadNd(padData []int64, del bool) (retVal Tensor, err error) { - if del { - defer ts.MustDrop() - } - - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - lib.AtgConstantPadNd(ptr, ts.ctensor, padData, len(padData)) - err = TorchErr() - if err != nil { - return retVal, err - } - - retVal = Tensor{ctensor: *ptr} - - return retVal, nil -} - -func (ts Tensor) MustConstantPadNd(padData []int64, del bool) (retVal Tensor) { - retVal, err := ts.ConstantPadNd(padData, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts Tensor) Sigmoid(del bool) (retVal Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - lib.AtgSigmoid(ptr, ts.ctensor) - err = TorchErr() - if err != nil { - return retVal, err - } - - retVal = Tensor{ctensor: *ptr} - - return retVal, nil -} - -func (ts Tensor) MustSigmoid(del bool) (retVal Tensor) { - retVal, err := ts.Sigmoid(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts Tensor) Flip(dims []int64) (retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - lib.AtgFlip(ptr, ts.ctensor, dims, len(dims)) - err = TorchErr() - if err != nil { - return retVal, err - } - - retVal = Tensor{ctensor: *ptr} - - return retVal, nil - -} - -func (ts Tensor) MustFlip(dims []int64) (retVal Tensor) { - retVal, err := ts.Flip(dims) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts Tensor) ReflectionPad2d(paddingData []int64) (retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - lib.AtgReflectionPad2d(ptr, ts.ctensor, paddingData, len(paddingData)) - err = TorchErr() - if err != nil { - return retVal, err - } - - retVal = Tensor{ctensor: *ptr} - - return retVal, nil - -} - -func (ts Tensor) MustReflectionPad2d(paddingData []int64) (retVal Tensor) { - retVal, err := ts.ReflectionPad2d(paddingData) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Arange(end Scalar, kind gotch.DType, device gotch.Device) (retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgArange(ptr, end.cscalar, kind.CInt(), device.CInt()) - err = TorchErr() - if err != nil { - return retVal, err - } - - retVal = Tensor{ctensor: *ptr} - - return retVal, nil - -} - -func MustArange(end Scalar, kind gotch.DType, device gotch.Device) (retVal Tensor) { - - retVal, err := Arange(end, kind, device) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Arange1(start Scalar, end Scalar, kind gotch.DType, device gotch.Device) (retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgArange1(ptr, start.cscalar, end.cscalar, kind.CInt(), device.CInt()) - err = TorchErr() - if err != nil { - return retVal, err - } - - retVal = Tensor{ctensor: *ptr} - - return retVal, nil - -} - -func MustArange1(start, end Scalar, kind gotch.DType, device gotch.Device) (retVal Tensor) { - - retVal, err := Arange1(start, end, kind, device) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Arange2(start Scalar, end Scalar, step Scalar, kind gotch.DType, device gotch.Device) (retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgArange2(ptr, start.cscalar, end.cscalar, step.cscalar, kind.CInt(), device.CInt()) - err = TorchErr() - if err != nil { - return retVal, err - } - - retVal = Tensor{ctensor: *ptr} - - return retVal, nil -} - -func MustArange2(start Scalar, end Scalar, step Scalar, kind gotch.DType, device gotch.Device) (retVal Tensor) { - - retVal, err := Arange2(start, end, step, kind, device) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func ArangeOut(out Tensor, end Scalar) (retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgArangeOut(ptr, out.ctensor, end.cscalar) - err = TorchErr() - if err != nil { - return retVal, err - } - - retVal = Tensor{ctensor: *ptr} - - return retVal, nil -} - -func MustArangeOut(out Tensor, end Scalar) (retVal Tensor) { - retVal, err := ArangeOut(out, end) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func ArangeOut1(out Tensor, start, end Scalar) (retVal Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgArangeOut1(ptr, out.ctensor, start.cscalar, end.cscalar) - err = TorchErr() - if err != nil { - return retVal, err - } - - retVal = Tensor{ctensor: *ptr} - - return retVal, nil -} - -func MustArangeOut1(out Tensor, start, end Scalar) (retVal Tensor) { - retVal, err := ArangeOut1(out, start, end) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts Tensor) Max1(other Tensor, del bool) (retVal Tensor, err error) { - if del { - defer ts.MustDrop() - } - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgMax1(ptr, ts.ctensor, other.ctensor) - err = TorchErr() - if err != nil { - return retVal, err - } - - retVal = Tensor{ctensor: *ptr} - - return retVal, nil -} - -func (ts Tensor) MustMax1(other Tensor, del bool) (retVal Tensor) { - - retVal, err := ts.Max1(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts Tensor) UpsampleNearest2d(outputSize []int64, scalesH, scalesW float64) (retVal Tensor, err error) { - - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - lib.AtgUpsampleNearest2d(ptr, ts.ctensor, outputSize, len(outputSize), scalesH, scalesW) - err = TorchErr() - if err != nil { - return retVal, err - } - - retVal = Tensor{ctensor: *ptr} - - return retVal, nil -} - -func (ts Tensor) MustUpsampleNearest2d(outputSize []int64, scalesH, scalesW float64) (retVal Tensor) { - - retVal, err := ts.UpsampleNearest2d(outputSize, scalesH, scalesW) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts Tensor) Repeat(repeatData []int64, del bool) (retVal Tensor, err error) { - if del { - defer ts.MustDrop() - } - - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - lib.AtgRepeat(ptr, ts.ctensor, repeatData, len(repeatData)) - err = TorchErr() - if err != nil { - return retVal, err - } - - retVal = Tensor{ctensor: *ptr} - - return retVal, nil -} - -func (ts Tensor) MustRepeat(repeatData []int64, del bool) (retVal Tensor) { - retVal, err := ts.Repeat(repeatData, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts Tensor) Contiguous(del bool) (retVal Tensor, err error) { - if del { - defer ts.MustDrop() - } - - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - lib.AtgContiguous(ptr, ts.ctensor) - err = TorchErr() - if err != nil { - return retVal, err - } - - retVal = Tensor{ctensor: *ptr} - - return retVal, nil -} - -func (ts Tensor) MustContiguous(del bool) (retVal Tensor) { - - retVal, err := ts.Contiguous(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts Tensor) Transpose(dim0, dim1 int64, del bool) (retVal Tensor, err error) { - if del { - defer ts.MustDrop() - } - - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - lib.AtgTranspose(ptr, ts.ctensor, dim0, dim1) - err = TorchErr() - if err != nil { - return retVal, err - } - - retVal = Tensor{ctensor: *ptr} - - return retVal, nil -} - -func (ts Tensor) MustTranspose(dim0, dim1 int64, del bool) (retVal Tensor) { - retVal, err := ts.Transpose(dim0, dim1, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts Tensor) Squeeze(del bool) (retVal Tensor, err error) { - if del { - defer ts.MustDrop() - } - - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - lib.AtgSqueeze(ptr, ts.ctensor) - err = TorchErr() - if err != nil { - return retVal, err - } - - retVal = Tensor{ctensor: *ptr} - - return retVal, nil -} - -func (ts Tensor) MustSqueeze(del bool) (retVal Tensor) { - retVal, err := ts.Squeeze(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} diff --git a/tensor/tensor-generated.go b/tensor/tensor-generated.go new file mode 100644 index 0000000..f8598c0 --- /dev/null +++ b/tensor/tensor-generated.go @@ -0,0 +1,13035 @@ +package tensor + +// NOTE. THIS FILE IS AUTOMATICALLY GENERATED, DO NOT EDIT BY HAND! + +// #include "stdlib.h" +import "C" + +import( + "unsafe" + + "github.com/sugarme/gotch" + lib "github.com/sugarme/gotch/libtch" +) + + +func(ts Tensor) __And_(other Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg__And_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) __And1(other Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg__And1(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) __Iand_(other Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg__Iand_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) __Iand1(other Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg__Iand1(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) __Ilshift_(other Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg__Ilshift_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) __Ilshift1(other Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg__Ilshift1(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) __Ior_(other Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg__Ior_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) __Ior1(other Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg__Ior1(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) __Irshift_(other Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg__Irshift_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) __Irshift1(other Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg__Irshift1(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) __Ixor_(other Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg__Ixor_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) __Ixor1(other Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg__Ixor1(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) __Lshift_(other Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg__Lshift_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) __Lshift1(other Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg__Lshift1(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) __Or_(other Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg__Or_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) __Or1(other Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg__Or1(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) __Rshift_(other Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg__Rshift_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) __Rshift1(other Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg__Rshift1(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) __Xor_(other Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg__Xor_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) __Xor1(other Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg__Xor1(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) _AdaptiveAvgPool2d(outputSize []int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_AdaptiveAvgPool2d(ptr, ts.ctensor, outputSize, len(outputSize)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) _AdaptiveAvgPool2dBackward(gradOutput Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_AdaptiveAvgPool2dBackward(ptr, gradOutput.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) _Addr(vec1 Tensor, vec2 Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_Addr(ptr, ts.ctensor, vec1.ctensor, vec2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) _Addr_(vec1 Tensor, vec2 Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_Addr_(ptr, ts.ctensor, vec1.ctensor, vec2.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) _AddrOut(out Tensor, vec1 Tensor, vec2 Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_AddrOut(ptr, out.ctensor, ts.ctensor, vec1.ctensor, vec2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func _AmpUpdateScale(growthTracker Tensor, currentScale Tensor, foundInf Tensor, scaleGrowthFactor float64, scaleBackoffFactor float64, growthInterval int64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_AmpUpdateScale(ptr, growthTracker.ctensor, currentScale.ctensor, foundInf.ctensor, scaleGrowthFactor, scaleBackoffFactor, growthInterval) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) _BaddbmmMkl_(batch1 Tensor, batch2 Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_BaddbmmMkl_(ptr, ts.ctensor, batch1.ctensor, batch2.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) _CastByte(nonBlocking bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cnonBlocking := int32(0) + if nonBlocking { cnonBlocking = int32(1) } +lib.Atg_CastByte(ptr, ts.ctensor, cnonBlocking) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) _CastChar(nonBlocking bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cnonBlocking := int32(0) + if nonBlocking { cnonBlocking = int32(1) } +lib.Atg_CastChar(ptr, ts.ctensor, cnonBlocking) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) _CastDouble(nonBlocking bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cnonBlocking := int32(0) + if nonBlocking { cnonBlocking = int32(1) } +lib.Atg_CastDouble(ptr, ts.ctensor, cnonBlocking) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) _CastFloat(nonBlocking bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cnonBlocking := int32(0) + if nonBlocking { cnonBlocking = int32(1) } +lib.Atg_CastFloat(ptr, ts.ctensor, cnonBlocking) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) _CastHalf(nonBlocking bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cnonBlocking := int32(0) + if nonBlocking { cnonBlocking = int32(1) } +lib.Atg_CastHalf(ptr, ts.ctensor, cnonBlocking) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) _CastInt(nonBlocking bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cnonBlocking := int32(0) + if nonBlocking { cnonBlocking = int32(1) } +lib.Atg_CastInt(ptr, ts.ctensor, cnonBlocking) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) _CastLong(nonBlocking bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cnonBlocking := int32(0) + if nonBlocking { cnonBlocking = int32(1) } +lib.Atg_CastLong(ptr, ts.ctensor, cnonBlocking) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) _CastShort(nonBlocking bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cnonBlocking := int32(0) + if nonBlocking { cnonBlocking = int32(1) } +lib.Atg_CastShort(ptr, ts.ctensor, cnonBlocking) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func _Cat(tensors []Tensor, dim int64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var ctensors []lib.Ctensor + for _, t := range tensors {ctensors = append(ctensors, t.ctensor)} +lib.Atg_Cat(ptr, ctensors, len(ctensors), dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func _CatOut(out Tensor, tensors []Tensor, dim int64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var ctensors []lib.Ctensor + for _, t := range tensors {ctensors = append(ctensors, t.ctensor)} +lib.Atg_CatOut(ptr, out.ctensor, ctensors, len(ctensors), dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func _CdistBackward(grad Tensor, x1 Tensor, x2 Tensor, p float64, cdist Tensor)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_CdistBackward(ptr, grad.ctensor, x1.ctensor, x2.ctensor, p, cdist.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) _CholeskyHelper(upper bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cupper := int32(0) + if upper { cupper = int32(1) } +lib.Atg_CholeskyHelper(ptr, ts.ctensor, cupper) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) _CholeskySolveHelper(a Tensor, upper bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cupper := int32(0) + if upper { cupper = int32(1) } +lib.Atg_CholeskySolveHelper(ptr, ts.ctensor, a.ctensor, cupper) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) _Coalesced_(coalesced bool)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ccoalesced := int32(0) + if coalesced { ccoalesced = int32(1) } +lib.Atg_Coalesced_(ptr, ts.ctensor, ccoalesced) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func _Convolution(input Tensor, weight Tensor, bias Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64, groups int64, benchmark bool, deterministic bool, cudnnEnabled bool)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ctransposed := int32(0) + if transposed { ctransposed = int32(1) } +cbenchmark := int32(0) + if benchmark { cbenchmark = int32(1) } +cdeterministic := int32(0) + if deterministic { cdeterministic = int32(1) } +ccudnnEnabled := int32(0) + if cudnnEnabled { ccudnnEnabled = int32(1) } +lib.Atg_Convolution(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation), ctransposed, outputPadding, len(outputPadding), groups, cbenchmark, cdeterministic, ccudnnEnabled) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func _ConvolutionNogroup(input Tensor, weight Tensor, bias Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ctransposed := int32(0) + if transposed { ctransposed = int32(1) } +lib.Atg_ConvolutionNogroup(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation), ctransposed, outputPadding, len(outputPadding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) _CopyFrom(dst Tensor, nonBlocking bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cnonBlocking := int32(0) + if nonBlocking { cnonBlocking = int32(1) } +lib.Atg_CopyFrom(ptr, ts.ctensor, dst.ctensor, cnonBlocking) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func _CtcLossBackward(grad Tensor, logProbs Tensor, targets Tensor, inputLengths []int64, targetLengths []int64, negLogLikelihood Tensor, logAlpha Tensor, blank int64, zeroInfinity bool)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + czeroInfinity := int32(0) + if zeroInfinity { czeroInfinity = int32(1) } +lib.Atg_CtcLossBackward(ptr, grad.ctensor, logProbs.ctensor, targets.ctensor, inputLengths, len(inputLengths), targetLengths, len(targetLengths), negLogLikelihood.ctensor, logAlpha.ctensor, blank, czeroInfinity) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func _CudnnInitDropoutState(dropout float64, train bool, dropoutSeed int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ctrain := int32(0) + if train { ctrain = int32(1) } +lib.Atg_CudnnInitDropoutState(ptr, dropout, ctrain, dropoutSeed, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func _CudnnRnnFlattenWeight(weightArr []Tensor, weightStride0 int64, inputSize int64, mode int64, hiddenSize int64, numLayers int64, batchFirst bool, bidirectional bool)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cweightArr []lib.Ctensor + for _, t := range weightArr {cweightArr = append(cweightArr, t.ctensor)} +cbatchFirst := int32(0) + if batchFirst { cbatchFirst = int32(1) } +cbidirectional := int32(0) + if bidirectional { cbidirectional = int32(1) } +lib.Atg_CudnnRnnFlattenWeight(ptr, cweightArr, len(cweightArr), weightStride0, inputSize, mode, hiddenSize, numLayers, cbatchFirst, cbidirectional) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) _Cumprod(dim int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_Cumprod(ptr, ts.ctensor, dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) _CumprodOut(out Tensor, dim int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_CumprodOut(ptr, out.ctensor, ts.ctensor, dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) _Cumsum(dim int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_Cumsum(ptr, ts.ctensor, dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) _CumsumOut(out Tensor, dim int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_CumsumOut(ptr, out.ctensor, ts.ctensor, dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func _DimArange(like Tensor, dim int64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_DimArange(ptr, like.ctensor, dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func _DirichletGrad(x Tensor, alpha Tensor, total Tensor)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_DirichletGrad(ptr, x.ctensor, alpha.ctensor, total.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func _EmbeddingBagBackward(grad Tensor, indices Tensor, offsets Tensor, offset2bag Tensor, bagSize Tensor, maximumIndices Tensor, numWeights int64, scaleGradByFreq bool, mode int64, sparse bool, perSampleWeights Tensor)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cscaleGradByFreq := int32(0) + if scaleGradByFreq { cscaleGradByFreq = int32(1) } +csparse := int32(0) + if sparse { csparse = int32(1) } +lib.Atg_EmbeddingBagBackward(ptr, grad.ctensor, indices.ctensor, offsets.ctensor, offset2bag.ctensor, bagSize.ctensor, maximumIndices.ctensor, numWeights, cscaleGradByFreq, mode, csparse, perSampleWeights.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func _EmbeddingBagDenseBackward(grad Tensor, indices Tensor, offsets Tensor, offset2bag Tensor, bagSize Tensor, maximumIndices Tensor, numWeights int64, scaleGradByFreq bool, mode int64, perSampleWeights Tensor)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cscaleGradByFreq := int32(0) + if scaleGradByFreq { cscaleGradByFreq = int32(1) } +lib.Atg_EmbeddingBagDenseBackward(ptr, grad.ctensor, indices.ctensor, offsets.ctensor, offset2bag.ctensor, bagSize.ctensor, maximumIndices.ctensor, numWeights, cscaleGradByFreq, mode, perSampleWeights.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func _EmbeddingBagPerSampleWeightsBackward(grad Tensor, weight Tensor, indices Tensor, offsets Tensor, offset2bag Tensor, mode int64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_EmbeddingBagPerSampleWeightsBackward(ptr, grad.ctensor, weight.ctensor, indices.ctensor, offsets.ctensor, offset2bag.ctensor, mode) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func _EmbeddingBagSparseBackward(grad Tensor, indices Tensor, offsets Tensor, offset2bag Tensor, bagSize Tensor, numWeights int64, scaleGradByFreq bool, mode int64, perSampleWeights Tensor)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cscaleGradByFreq := int32(0) + if scaleGradByFreq { cscaleGradByFreq = int32(1) } +lib.Atg_EmbeddingBagSparseBackward(ptr, grad.ctensor, indices.ctensor, offsets.ctensor, offset2bag.ctensor, bagSize.ctensor, numWeights, cscaleGradByFreq, mode, perSampleWeights.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func _EmptyAffineQuantized(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device, scale float64, zeroPoint int64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_EmptyAffineQuantized(ptr, size, len(size), optionsKind.CInt(), optionsDevice.CInt(), scale, zeroPoint) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func _EmptyPerChannelAffineQuantized(size []int64, scales Tensor, zeroPoints Tensor, axis int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_EmptyPerChannelAffineQuantized(ptr, size, len(size), scales.ctensor, zeroPoints.ctensor, axis, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) _FftWithSize(signalNdim int64, complexInput bool, complexOutput bool, inverse bool, checkedSignalSizes []int64, normalized bool, onesided bool, outputSizes []int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ccomplexInput := int32(0) + if complexInput { ccomplexInput = int32(1) } +ccomplexOutput := int32(0) + if complexOutput { ccomplexOutput = int32(1) } +cinverse := int32(0) + if inverse { cinverse = int32(1) } +cnormalized := int32(0) + if normalized { cnormalized = int32(1) } +conesided := int32(0) + if onesided { conesided = int32(1) } +lib.Atg_FftWithSize(ptr, ts.ctensor, signalNdim, ccomplexInput, ccomplexOutput, cinverse, checkedSignalSizes, len(checkedSignalSizes), cnormalized, conesided, outputSizes, len(outputSizes)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) _GatherSparseBackward(dim int64, index Tensor, grad Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_GatherSparseBackward(ptr, ts.ctensor, dim, index.ctensor, grad.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) _IndexCopy_(dim int64, index Tensor, source Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_IndexCopy_(ptr, ts.ctensor, dim, index.ctensor, source.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) _IndexPutImpl_(indices []Tensor, values Tensor, accumulate bool, unsafety bool)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cindices []lib.Ctensor + for _, t := range indices {cindices = append(cindices, t.ctensor)} +caccumulate := int32(0) + if accumulate { caccumulate = int32(1) } +cunsafety := int32(0) + if unsafety { cunsafety = int32(1) } +lib.Atg_IndexPutImpl_(ptr, ts.ctensor, cindices, len(cindices), values.ctensor, caccumulate, cunsafety) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) _Indices(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_Indices(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) _InverseHelper(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_InverseHelper(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) _LogSoftmax(dim int64, halfToFloat bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + chalfToFloat := int32(0) + if halfToFloat { chalfToFloat = int32(1) } +lib.Atg_LogSoftmax(ptr, ts.ctensor, dim, chalfToFloat) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) _LogSoftmaxBackwardData(gradOutput Tensor, output Tensor, dim int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_LogSoftmaxBackwardData(ptr, gradOutput.ctensor, output.ctensor, dim, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) _LuSolveHelper(lUData Tensor, lUPivots Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_LuSolveHelper(ptr, ts.ctensor, lUData.ctensor, lUPivots.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) _MakePerChannelQuantizedTensor(scale Tensor, zeroPoint Tensor, axis int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_MakePerChannelQuantizedTensor(ptr, ts.ctensor, scale.ctensor, zeroPoint.ctensor, axis) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) _MakePerTensorQuantizedTensor(scale float64, zeroPoint int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_MakePerTensorQuantizedTensor(ptr, ts.ctensor, scale, zeroPoint) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) _MaskedScale(mask Tensor, scale float64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_MaskedScale(ptr, ts.ctensor, mask.ctensor, scale) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) _MkldnnReshape(shape []int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_MkldnnReshape(ptr, ts.ctensor, shape, len(shape)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) _MkldnnTranspose(dim0 int64, dim1 int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_MkldnnTranspose(ptr, ts.ctensor, dim0, dim1) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) _MkldnnTranspose_(dim0 int64, dim1 int64)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_MkldnnTranspose_(ptr, ts.ctensor, dim0, dim1) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func _MultinomialAliasDraw(j Tensor, q Tensor, numSamples int64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_MultinomialAliasDraw(ptr, j.ctensor, q.ctensor, numSamples) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func _NnpackSpatialConvolution(input Tensor, weight Tensor, bias Tensor, padding []int64, stride []int64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_NnpackSpatialConvolution(ptr, input.ctensor, weight.ctensor, bias.ctensor, padding, len(padding), stride, len(stride)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func _NnpackSpatialConvolutionBackwardInput(input Tensor, gradOutput Tensor, weight Tensor, padding []int64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_NnpackSpatialConvolutionBackwardInput(ptr, input.ctensor, gradOutput.ctensor, weight.ctensor, padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func _NnpackSpatialConvolutionBackwardWeight(input Tensor, weightsize []int64, gradOutput Tensor, padding []int64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_NnpackSpatialConvolutionBackwardWeight(ptr, input.ctensor, weightsize, len(weightsize), gradOutput.ctensor, padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func _PackPaddedSequenceBackward(grad Tensor, inputSize []int64, batchSizes Tensor, batchFirst bool)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cbatchFirst := int32(0) + if batchFirst { cbatchFirst = int32(1) } +lib.Atg_PackPaddedSequenceBackward(ptr, grad.ctensor, inputSize, len(inputSize), batchSizes.ctensor, cbatchFirst) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) _PdistBackward(grad Tensor, p float64, pdist Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_PdistBackward(ptr, grad.ctensor, ts.ctensor, p, pdist.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) _ReshapeFromTensor(shape Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_ReshapeFromTensor(ptr, ts.ctensor, shape.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) _SWhere(condition Tensor, other Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_SWhere(ptr, condition.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) _SampleDirichlet(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_SampleDirichlet(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) _ShapeAsTensor(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_ShapeAsTensor(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) _SobolEngineFf_(n int64, sobolstate Tensor, dimension int64, numGenerated int64)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_SobolEngineFf_(ptr, ts.ctensor, n, sobolstate.ctensor, dimension, numGenerated) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) _SobolEngineInitializeState_(dimension int64)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_SobolEngineInitializeState_(ptr, ts.ctensor, dimension) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) _SobolEngineScramble_(ltm Tensor, dimension int64)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_SobolEngineScramble_(ptr, ts.ctensor, ltm.ctensor, dimension) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) _Softmax(dim int64, halfToFloat bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + chalfToFloat := int32(0) + if halfToFloat { chalfToFloat = int32(1) } +lib.Atg_Softmax(ptr, ts.ctensor, dim, chalfToFloat) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) _SoftmaxBackwardData(gradOutput Tensor, output Tensor, dim int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_SoftmaxBackwardData(ptr, gradOutput.ctensor, output.ctensor, dim, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) _SparseAddmm(sparse Tensor, dense Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_SparseAddmm(ptr, ts.ctensor, sparse.ctensor, dense.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func _SparseCooTensorUnsafe(indices Tensor, values Tensor, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_SparseCooTensorUnsafe(ptr, indices.ctensor, values.ctensor, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func _SparseCooTensorWithDims(sparseDim int64, denseDim int64, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_SparseCooTensorWithDims(ptr, sparseDim, denseDim, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func _SparseCooTensorWithDimsAndTensors(sparseDim int64, denseDim int64, size []int64, indices Tensor, values Tensor, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_SparseCooTensorWithDimsAndTensors(ptr, sparseDim, denseDim, size, len(size), indices.ctensor, values.ctensor, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func _SparseMm(sparse Tensor, dense Tensor)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_SparseMm(ptr, sparse.ctensor, dense.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) _SparseSum(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_SparseSum(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) _SparseSum1(dtype gotch.DType, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_SparseSum1(ptr, ts.ctensor, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) _SparseSum2(dim []int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_SparseSum2(ptr, ts.ctensor, dim, len(dim)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) _SparseSum3(dim []int64, dtype gotch.DType, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_SparseSum3(ptr, ts.ctensor, dim, len(dim), dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) _SparseSumBackward(grad Tensor, dim []int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_SparseSumBackward(ptr, grad.ctensor, ts.ctensor, dim, len(dim)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) _StandardGamma(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_StandardGamma(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) _StandardGammaGrad(output Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_StandardGammaGrad(ptr, ts.ctensor, output.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) _Std(unbiased bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cunbiased := int32(0) + if unbiased { cunbiased = int32(1) } +lib.Atg_Std(ptr, ts.ctensor, cunbiased) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func _Trilinear(i1 Tensor, i2 Tensor, i3 Tensor, expand1 []int64, expand2 []int64, expand3 []int64, sumdim []int64, unrollDim int64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_Trilinear(ptr, i1.ctensor, i2.ctensor, i3.ctensor, expand1, len(expand1), expand2, len(expand2), expand3, len(expand3), sumdim, len(sumdim), unrollDim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) _UnsafeView(size []int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_UnsafeView(ptr, ts.ctensor, size, len(size)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) _Values(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_Values(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) _Var(unbiased bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cunbiased := int32(0) + if unbiased { cunbiased = int32(1) } +lib.Atg_Var(ptr, ts.ctensor, cunbiased) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func _WeightNorm(v Tensor, g Tensor, dim int64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_WeightNorm(ptr, v.ctensor, g.ctensor, dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Abs(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAbs(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Abs_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAbs_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) AbsOut(out Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAbsOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Acos(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAcos(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Acos_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAcos_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) AcosOut(out Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAcosOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) AdaptiveAvgPool1d(outputSize []int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAdaptiveAvgPool1d(ptr, ts.ctensor, outputSize, len(outputSize)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) AdaptiveAvgPool2d(outputSize []int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAdaptiveAvgPool2d(ptr, ts.ctensor, outputSize, len(outputSize)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) AdaptiveAvgPool2dOut(out Tensor, outputSize []int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAdaptiveAvgPool2dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) AdaptiveAvgPool3d(outputSize []int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAdaptiveAvgPool3d(ptr, ts.ctensor, outputSize, len(outputSize)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) AdaptiveAvgPool3dBackward(gradOutput Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAdaptiveAvgPool3dBackward(ptr, gradOutput.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) AdaptiveAvgPool3dBackwardOut(gradInput Tensor, gradOutput Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAdaptiveAvgPool3dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) AdaptiveAvgPool3dOut(out Tensor, outputSize []int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAdaptiveAvgPool3dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) AdaptiveMaxPool2dBackward(gradOutput Tensor, indices Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAdaptiveMaxPool2dBackward(ptr, gradOutput.ctensor, ts.ctensor, indices.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) AdaptiveMaxPool2dBackwardOut(gradInput Tensor, gradOutput Tensor, indices Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAdaptiveMaxPool2dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, indices.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) AdaptiveMaxPool3dBackward(gradOutput Tensor, indices Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAdaptiveMaxPool3dBackward(ptr, gradOutput.ctensor, ts.ctensor, indices.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) AdaptiveMaxPool3dBackwardOut(gradInput Tensor, gradOutput Tensor, indices Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAdaptiveMaxPool3dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, indices.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Add(other Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAdd(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Add1(other Scalar, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAdd1(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Add_(other Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAdd_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) Add1_(other Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAdd1_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) AddOut(out Tensor, other Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAddOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Addbmm(batch1 Tensor, batch2 Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAddbmm(ptr, ts.ctensor, batch1.ctensor, batch2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Addbmm_(batch1 Tensor, batch2 Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAddbmm_(ptr, ts.ctensor, batch1.ctensor, batch2.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) AddbmmOut(out Tensor, batch1 Tensor, batch2 Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAddbmmOut(ptr, out.ctensor, ts.ctensor, batch1.ctensor, batch2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Addcdiv(tensor1 Tensor, tensor2 Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAddcdiv(ptr, ts.ctensor, tensor1.ctensor, tensor2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Addcdiv_(tensor1 Tensor, tensor2 Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAddcdiv_(ptr, ts.ctensor, tensor1.ctensor, tensor2.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) AddcdivOut(out Tensor, tensor1 Tensor, tensor2 Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAddcdivOut(ptr, out.ctensor, ts.ctensor, tensor1.ctensor, tensor2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Addcmul(tensor1 Tensor, tensor2 Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAddcmul(ptr, ts.ctensor, tensor1.ctensor, tensor2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Addcmul_(tensor1 Tensor, tensor2 Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAddcmul_(ptr, ts.ctensor, tensor1.ctensor, tensor2.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) AddcmulOut(out Tensor, tensor1 Tensor, tensor2 Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAddcmulOut(ptr, out.ctensor, ts.ctensor, tensor1.ctensor, tensor2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Addmm(mat1 Tensor, mat2 Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAddmm(ptr, ts.ctensor, mat1.ctensor, mat2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Addmm_(mat1 Tensor, mat2 Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAddmm_(ptr, ts.ctensor, mat1.ctensor, mat2.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) AddmmOut(out Tensor, mat1 Tensor, mat2 Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAddmmOut(ptr, out.ctensor, ts.ctensor, mat1.ctensor, mat2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Addmv(mat Tensor, vec Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAddmv(ptr, ts.ctensor, mat.ctensor, vec.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Addmv_(mat Tensor, vec Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAddmv_(ptr, ts.ctensor, mat.ctensor, vec.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) AddmvOut(out Tensor, mat Tensor, vec Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAddmvOut(ptr, out.ctensor, ts.ctensor, mat.ctensor, vec.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Addr(vec1 Tensor, vec2 Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAddr(ptr, ts.ctensor, vec1.ctensor, vec2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Addr_(vec1 Tensor, vec2 Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAddr_(ptr, ts.ctensor, vec1.ctensor, vec2.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) AddrOut(out Tensor, vec1 Tensor, vec2 Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAddrOut(ptr, out.ctensor, ts.ctensor, vec1.ctensor, vec2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func AffineGridGenerator(theta Tensor, size []int64, alignCorners bool)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { calignCorners = int32(1) } +lib.AtgAffineGridGenerator(ptr, theta.ctensor, size, len(size), calignCorners) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func AffineGridGeneratorBackward(grad Tensor, size []int64, alignCorners bool)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { calignCorners = int32(1) } +lib.AtgAffineGridGeneratorBackward(ptr, grad.ctensor, size, len(size), calignCorners) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Alias(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAlias(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) AlignAs(other Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAlignAs(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) All(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAll(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) All1(dim int64, keepdim bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } +lib.AtgAll1(ptr, ts.ctensor, dim, ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) AllOut(out Tensor, dim int64, keepdim bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } +lib.AtgAllOut(ptr, out.ctensor, ts.ctensor, dim, ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func AlphaDropout(input Tensor, p float64, train bool)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ctrain := int32(0) + if train { ctrain = int32(1) } +lib.AtgAlphaDropout(ptr, input.ctensor, p, ctrain) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) AlphaDropout_(p float64, train bool)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ctrain := int32(0) + if train { ctrain = int32(1) } +lib.AtgAlphaDropout_(ptr, ts.ctensor, p, ctrain) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) Angle(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAngle(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) AngleOut(out Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAngleOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Any(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAny(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Any1(dim int64, keepdim bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } +lib.AtgAny1(ptr, ts.ctensor, dim, ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) AnyOut(out Tensor, dim int64, keepdim bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } +lib.AtgAnyOut(ptr, out.ctensor, ts.ctensor, dim, ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func Arange(end Scalar, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgArange(ptr, end.cscalar, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func Arange1(start Scalar, end Scalar, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgArange1(ptr, start.cscalar, end.cscalar, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func Arange2(start Scalar, end Scalar, step Scalar, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgArange2(ptr, start.cscalar, end.cscalar, step.cscalar, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func ArangeOut(out Tensor, end Scalar)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgArangeOut(ptr, out.ctensor, end.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func ArangeOut1(out Tensor, start Scalar, end Scalar)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgArangeOut1(ptr, out.ctensor, start.cscalar, end.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Argmax(dim int64, keepdim bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } +lib.AtgArgmax(ptr, ts.ctensor, dim, ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Argmin(dim int64, keepdim bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } +lib.AtgArgmin(ptr, ts.ctensor, dim, ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Argsort(dim int64, descending bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cdescending := int32(0) + if descending { cdescending = int32(1) } +lib.AtgArgsort(ptr, ts.ctensor, dim, cdescending) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) AsStrided(size []int64, stride []int64, storageOffset int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAsStrided(ptr, ts.ctensor, size, len(size), stride, len(stride), storageOffset) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) AsStrided_(size []int64, stride []int64, storageOffset int64)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAsStrided_(ptr, ts.ctensor, size, len(size), stride, len(stride), storageOffset) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) Asin(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAsin(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Asin_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAsin_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) AsinOut(out Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAsinOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Atan(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAtan(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Atan2(other Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAtan2(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Atan2_(other Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAtan2_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) Atan2Out(out Tensor, other Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAtan2Out(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Atan_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAtan_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) AtanOut(out Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAtanOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) AvgPool1d(kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cceilMode := int32(0) + if ceilMode { cceilMode = int32(1) } +ccountIncludePad := int32(0) + if countIncludePad { ccountIncludePad = int32(1) } +lib.AtgAvgPool1d(ptr, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), cceilMode, ccountIncludePad) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) AvgPool2d(kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cceilMode := int32(0) + if ceilMode { cceilMode = int32(1) } +ccountIncludePad := int32(0) + if countIncludePad { ccountIncludePad = int32(1) } +lib.AtgAvgPool2d(ptr, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), cceilMode, ccountIncludePad, divisorOverride) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) AvgPool2dBackward(gradOutput Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cceilMode := int32(0) + if ceilMode { cceilMode = int32(1) } +ccountIncludePad := int32(0) + if countIncludePad { ccountIncludePad = int32(1) } +lib.AtgAvgPool2dBackward(ptr, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), cceilMode, ccountIncludePad, divisorOverride) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) AvgPool2dBackwardOut(gradInput Tensor, gradOutput Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cceilMode := int32(0) + if ceilMode { cceilMode = int32(1) } +ccountIncludePad := int32(0) + if countIncludePad { ccountIncludePad = int32(1) } +lib.AtgAvgPool2dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), cceilMode, ccountIncludePad, divisorOverride) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) AvgPool2dOut(out Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cceilMode := int32(0) + if ceilMode { cceilMode = int32(1) } +ccountIncludePad := int32(0) + if countIncludePad { ccountIncludePad = int32(1) } +lib.AtgAvgPool2dOut(ptr, out.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), cceilMode, ccountIncludePad, divisorOverride) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) AvgPool3d(kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cceilMode := int32(0) + if ceilMode { cceilMode = int32(1) } +ccountIncludePad := int32(0) + if countIncludePad { ccountIncludePad = int32(1) } +lib.AtgAvgPool3d(ptr, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), cceilMode, ccountIncludePad, divisorOverride) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) AvgPool3dBackward(gradOutput Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cceilMode := int32(0) + if ceilMode { cceilMode = int32(1) } +ccountIncludePad := int32(0) + if countIncludePad { ccountIncludePad = int32(1) } +lib.AtgAvgPool3dBackward(ptr, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), cceilMode, ccountIncludePad, divisorOverride) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) AvgPool3dBackwardOut(gradInput Tensor, gradOutput Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cceilMode := int32(0) + if ceilMode { cceilMode = int32(1) } +ccountIncludePad := int32(0) + if countIncludePad { ccountIncludePad = int32(1) } +lib.AtgAvgPool3dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), cceilMode, ccountIncludePad, divisorOverride) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) AvgPool3dOut(out Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cceilMode := int32(0) + if ceilMode { cceilMode = int32(1) } +ccountIncludePad := int32(0) + if countIncludePad { ccountIncludePad = int32(1) } +lib.AtgAvgPool3dOut(ptr, out.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), cceilMode, ccountIncludePad, divisorOverride) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Baddbmm(batch1 Tensor, batch2 Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBaddbmm(ptr, ts.ctensor, batch1.ctensor, batch2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Baddbmm_(batch1 Tensor, batch2 Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBaddbmm_(ptr, ts.ctensor, batch1.ctensor, batch2.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) BaddbmmOut(out Tensor, batch1 Tensor, batch2 Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBaddbmmOut(ptr, out.ctensor, ts.ctensor, batch1.ctensor, batch2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func BartlettWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBartlettWindow(ptr, windowLength, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func BartlettWindow1(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cperiodic := int32(0) + if periodic { cperiodic = int32(1) } +lib.AtgBartlettWindow1(ptr, windowLength, cperiodic, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func BatchNorm(input Tensor, weight Tensor, bias Tensor, runningMean Tensor, runningVar Tensor, training bool, momentum float64, eps float64, cudnnEnabled bool)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ctraining := int32(0) + if training { ctraining = int32(1) } +ccudnnEnabled := int32(0) + if cudnnEnabled { ccudnnEnabled = int32(1) } +lib.AtgBatchNorm(ptr, input.ctensor, weight.ctensor, bias.ctensor, runningMean.ctensor, runningVar.ctensor, ctraining, momentum, eps, ccudnnEnabled) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func BatchNormBackwardElemt(gradOut Tensor, input Tensor, mean Tensor, invstd Tensor, weight Tensor, meanDy Tensor, meanDyXmu Tensor)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBatchNormBackwardElemt(ptr, gradOut.ctensor, input.ctensor, mean.ctensor, invstd.ctensor, weight.ctensor, meanDy.ctensor, meanDyXmu.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func BatchNormElemt(input Tensor, weight Tensor, bias Tensor, mean Tensor, invstd Tensor, eps float64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBatchNormElemt(ptr, input.ctensor, weight.ctensor, bias.ctensor, mean.ctensor, invstd.ctensor, eps) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func BatchNormElemtOut(out Tensor, input Tensor, weight Tensor, bias Tensor, mean Tensor, invstd Tensor, eps float64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBatchNormElemtOut(ptr, out.ctensor, input.ctensor, weight.ctensor, bias.ctensor, mean.ctensor, invstd.ctensor, eps) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Bernoulli(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBernoulli(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Bernoulli1(p float64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBernoulli1(ptr, ts.ctensor, p) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Bernoulli_(p Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBernoulli_(ptr, ts.ctensor, p.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) Bernoulli1_(p float64)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBernoulli1_(ptr, ts.ctensor, p) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) BernoulliOut(out Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBernoulliOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func Bilinear(input1 Tensor, input2 Tensor, weight Tensor, bias Tensor)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBilinear(ptr, input1.ctensor, input2.ctensor, weight.ctensor, bias.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) BinaryCrossEntropy(target Tensor, weight Tensor, reduction int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBinaryCrossEntropy(ptr, ts.ctensor, target.ctensor, weight.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) BinaryCrossEntropyBackward(gradOutput Tensor, target Tensor, weight Tensor, reduction int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBinaryCrossEntropyBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) BinaryCrossEntropyBackwardOut(gradInput Tensor, gradOutput Tensor, target Tensor, weight Tensor, reduction int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBinaryCrossEntropyBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) BinaryCrossEntropyOut(out Tensor, target Tensor, weight Tensor, reduction int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBinaryCrossEntropyOut(ptr, out.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) BinaryCrossEntropyWithLogits(target Tensor, weight Tensor, posWeight Tensor, reduction int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBinaryCrossEntropyWithLogits(ptr, ts.ctensor, target.ctensor, weight.ctensor, posWeight.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) BinaryCrossEntropyWithLogitsBackward(gradOutput Tensor, target Tensor, weight Tensor, posWeight Tensor, reduction int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBinaryCrossEntropyWithLogitsBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, weight.ctensor, posWeight.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Bincount(weights Tensor, minlength int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBincount(ptr, ts.ctensor, weights.ctensor, minlength) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) BitwiseAnd(other Scalar, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseAnd(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) BitwiseAnd1(other Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseAnd1(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) BitwiseAnd_(other Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseAnd_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) BitwiseAnd1_(other Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseAnd1_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) BitwiseAndOut(out Tensor, other Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseAndOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) BitwiseAndOut1(out Tensor, other Scalar, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseAndOut1(ptr, out.ctensor, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) BitwiseNot(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseNot(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) BitwiseNot_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseNot_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) BitwiseNotOut(out Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseNotOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) BitwiseOr(other Scalar, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseOr(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) BitwiseOr1(other Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseOr1(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) BitwiseOr_(other Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseOr_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) BitwiseOr1_(other Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseOr1_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) BitwiseOrOut(out Tensor, other Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseOrOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) BitwiseOrOut1(out Tensor, other Scalar, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseOrOut1(ptr, out.ctensor, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) BitwiseXor(other Scalar, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseXor(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) BitwiseXor1(other Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseXor1(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) BitwiseXor_(other Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseXor_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) BitwiseXor1_(other Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseXor1_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) BitwiseXorOut(out Tensor, other Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseXorOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) BitwiseXorOut1(out Tensor, other Scalar, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBitwiseXorOut1(ptr, out.ctensor, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func BlackmanWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBlackmanWindow(ptr, windowLength, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func BlackmanWindow1(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cperiodic := int32(0) + if periodic { cperiodic = int32(1) } +lib.AtgBlackmanWindow1(ptr, windowLength, cperiodic, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Bmm(mat2 Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBmm(ptr, ts.ctensor, mat2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) BmmOut(out Tensor, mat2 Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBmmOut(ptr, out.ctensor, ts.ctensor, mat2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func CartesianProd(tensors []Tensor)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var ctensors []lib.Ctensor + for _, t := range tensors {ctensors = append(ctensors, t.ctensor)} +lib.AtgCartesianProd(ptr, ctensors, len(ctensors)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func Cat(tensors []Tensor, dim int64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var ctensors []lib.Ctensor + for _, t := range tensors {ctensors = append(ctensors, t.ctensor)} +lib.AtgCat(ptr, ctensors, len(ctensors), dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func CatOut(out Tensor, tensors []Tensor, dim int64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var ctensors []lib.Ctensor + for _, t := range tensors {ctensors = append(ctensors, t.ctensor)} +lib.AtgCatOut(ptr, out.ctensor, ctensors, len(ctensors), dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Cauchy_(median float64, sigma float64)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCauchy_(ptr, ts.ctensor, median, sigma) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func Cdist(x1 Tensor, x2 Tensor, p float64, computeMode int64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCdist(ptr, x1.ctensor, x2.ctensor, p, computeMode) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Ceil(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCeil(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Ceil_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCeil_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) CeilOut(out Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCeilOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Celu(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCelu(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Celu_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCelu_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func ChainMatmul(matrices []Tensor)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cmatrices []lib.Ctensor + for _, t := range matrices {cmatrices = append(cmatrices, t.ctensor)} +lib.AtgChainMatmul(ptr, cmatrices, len(cmatrices)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Cholesky(upper bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cupper := int32(0) + if upper { cupper = int32(1) } +lib.AtgCholesky(ptr, ts.ctensor, cupper) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) CholeskyInverse(upper bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cupper := int32(0) + if upper { cupper = int32(1) } +lib.AtgCholeskyInverse(ptr, ts.ctensor, cupper) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) CholeskyInverseOut(out Tensor, upper bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cupper := int32(0) + if upper { cupper = int32(1) } +lib.AtgCholeskyInverseOut(ptr, out.ctensor, ts.ctensor, cupper) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) CholeskyOut(out Tensor, upper bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cupper := int32(0) + if upper { cupper = int32(1) } +lib.AtgCholeskyOut(ptr, out.ctensor, ts.ctensor, cupper) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) CholeskySolve(input2 Tensor, upper bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cupper := int32(0) + if upper { cupper = int32(1) } +lib.AtgCholeskySolve(ptr, ts.ctensor, input2.ctensor, cupper) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) CholeskySolveOut(out Tensor, input2 Tensor, upper bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cupper := int32(0) + if upper { cupper = int32(1) } +lib.AtgCholeskySolveOut(ptr, out.ctensor, ts.ctensor, input2.ctensor, cupper) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Clamp(min Scalar, max Scalar, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgClamp(ptr, ts.ctensor, min.cscalar, max.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Clamp_(min Scalar, max Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgClamp_(ptr, ts.ctensor, min.cscalar, max.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) ClampMax(max Scalar, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgClampMax(ptr, ts.ctensor, max.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) ClampMax_(max Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgClampMax_(ptr, ts.ctensor, max.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) ClampMaxOut(out Tensor, max Scalar, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgClampMaxOut(ptr, out.ctensor, ts.ctensor, max.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) ClampMin(min Scalar, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgClampMin(ptr, ts.ctensor, min.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) ClampMin_(min Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgClampMin_(ptr, ts.ctensor, min.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) ClampMinOut(out Tensor, min Scalar, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgClampMinOut(ptr, out.ctensor, ts.ctensor, min.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) ClampOut(out Tensor, min Scalar, max Scalar, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgClampOut(ptr, out.ctensor, ts.ctensor, min.cscalar, max.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Coalesce(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCoalesce(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Col2im(outputSize []int64, kernelSize []int64, dilation []int64, padding []int64, stride []int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCol2im(ptr, ts.ctensor, outputSize, len(outputSize), kernelSize, len(kernelSize), dilation, len(dilation), padding, len(padding), stride, len(stride)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func Col2imBackward(gradOutput Tensor, kernelSize []int64, dilation []int64, padding []int64, stride []int64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCol2imBackward(ptr, gradOutput.ctensor, kernelSize, len(kernelSize), dilation, len(dilation), padding, len(padding), stride, len(stride)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func Col2imBackwardOut(gradInput Tensor, gradOutput Tensor, kernelSize []int64, dilation []int64, padding []int64, stride []int64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCol2imBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, kernelSize, len(kernelSize), dilation, len(dilation), padding, len(padding), stride, len(stride)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Col2imOut(out Tensor, outputSize []int64, kernelSize []int64, dilation []int64, padding []int64, stride []int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCol2imOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), kernelSize, len(kernelSize), dilation, len(dilation), padding, len(padding), stride, len(stride)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Combinations(r int64, withReplacement bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cwithReplacement := int32(0) + if withReplacement { cwithReplacement = int32(1) } +lib.AtgCombinations(ptr, ts.ctensor, r, cwithReplacement) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Conj(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgConj(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) ConjOut(out Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgConjOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) ConstantPadNd(pad []int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgConstantPadNd(ptr, ts.ctensor, pad, len(pad)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Contiguous(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgContiguous(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func Conv1d(input Tensor, weight Tensor, bias Tensor, stride []int64, padding []int64, dilation []int64, groups int64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgConv1d(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation), groups) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func Conv2d(input Tensor, weight Tensor, bias Tensor, stride []int64, padding []int64, dilation []int64, groups int64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgConv2d(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation), groups) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func Conv3d(input Tensor, weight Tensor, bias Tensor, stride []int64, padding []int64, dilation []int64, groups int64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgConv3d(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation), groups) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) ConvTbc(weight Tensor, bias Tensor, pad int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgConvTbc(ptr, ts.ctensor, weight.ctensor, bias.ctensor, pad) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func ConvTranspose1d(input Tensor, weight Tensor, bias Tensor, stride []int64, padding []int64, outputPadding []int64, groups int64, dilation []int64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgConvTranspose1d(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, len(padding), outputPadding, len(outputPadding), groups, dilation, len(dilation)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func ConvTranspose2d(input Tensor, weight Tensor, bias Tensor, stride []int64, padding []int64, outputPadding []int64, groups int64, dilation []int64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgConvTranspose2d(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, len(padding), outputPadding, len(outputPadding), groups, dilation, len(dilation)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func ConvTranspose3d(input Tensor, weight Tensor, bias Tensor, stride []int64, padding []int64, outputPadding []int64, groups int64, dilation []int64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgConvTranspose3d(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, len(padding), outputPadding, len(outputPadding), groups, dilation, len(dilation)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func Convolution(input Tensor, weight Tensor, bias Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64, groups int64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ctransposed := int32(0) + if transposed { ctransposed = int32(1) } +lib.AtgConvolution(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation), ctransposed, outputPadding, len(outputPadding), groups) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func ConvolutionOverrideable(input Tensor, weight Tensor, bias Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64, groups int64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ctransposed := int32(0) + if transposed { ctransposed = int32(1) } +lib.AtgConvolutionOverrideable(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation), ctransposed, outputPadding, len(outputPadding), groups) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) CopySparseToSparse_(src Tensor, nonBlocking bool)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cnonBlocking := int32(0) + if nonBlocking { cnonBlocking = int32(1) } +lib.AtgCopySparseToSparse_(ptr, ts.ctensor, src.ctensor, cnonBlocking) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) Cos(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCos(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Cos_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCos_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) CosOut(out Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCosOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Cosh(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCosh(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Cosh_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCosh_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) CoshOut(out Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCoshOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func CosineEmbeddingLoss(input1 Tensor, input2 Tensor, target Tensor, margin float64, reduction int64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCosineEmbeddingLoss(ptr, input1.ctensor, input2.ctensor, target.ctensor, margin, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func CosineSimilarity(x1 Tensor, x2 Tensor, dim int64, eps float64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCosineSimilarity(ptr, x1.ctensor, x2.ctensor, dim, eps) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Cross(other Tensor, dim int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCross(ptr, ts.ctensor, other.ctensor, dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) CrossOut(out Tensor, other Tensor, dim int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCrossOut(ptr, out.ctensor, ts.ctensor, other.ctensor, dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func CtcLoss(logProbs Tensor, targets Tensor, inputLengths []int64, targetLengths []int64, blank int64, reduction int64, zeroInfinity bool)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + czeroInfinity := int32(0) + if zeroInfinity { czeroInfinity = int32(1) } +lib.AtgCtcLoss(ptr, logProbs.ctensor, targets.ctensor, inputLengths, len(inputLengths), targetLengths, len(targetLengths), blank, reduction, czeroInfinity) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func CtcLoss1(logProbs Tensor, targets Tensor, inputLengths Tensor, targetLengths Tensor, blank int64, reduction int64, zeroInfinity bool)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + czeroInfinity := int32(0) + if zeroInfinity { czeroInfinity = int32(1) } +lib.AtgCtcLoss1(ptr, logProbs.ctensor, targets.ctensor, inputLengths.ctensor, targetLengths.ctensor, blank, reduction, czeroInfinity) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func CudnnAffineGridGenerator(theta Tensor, n int64, c int64, h int64, w int64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCudnnAffineGridGenerator(ptr, theta.ctensor, n, c, h, w) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func CudnnAffineGridGeneratorBackward(grad Tensor, n int64, c int64, h int64, w int64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCudnnAffineGridGeneratorBackward(ptr, grad.ctensor, n, c, h, w) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) CudnnConvolution(weight Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cbenchmark := int32(0) + if benchmark { cbenchmark = int32(1) } +cdeterministic := int32(0) + if deterministic { cdeterministic = int32(1) } +lib.AtgCudnnConvolution(ptr, ts.ctensor, weight.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) CudnnConvolution1(weight Tensor, bias Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cbenchmark := int32(0) + if benchmark { cbenchmark = int32(1) } +cdeterministic := int32(0) + if deterministic { cdeterministic = int32(1) } +lib.AtgCudnnConvolution1(ptr, ts.ctensor, weight.ctensor, bias.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func CudnnConvolutionBackwardInput(selfSize []int64, gradOutput Tensor, weight Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cbenchmark := int32(0) + if benchmark { cbenchmark = int32(1) } +cdeterministic := int32(0) + if deterministic { cdeterministic = int32(1) } +lib.AtgCudnnConvolutionBackwardInput(ptr, selfSize, len(selfSize), gradOutput.ctensor, weight.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) CudnnConvolutionBackwardWeight(weightSize []int64, gradOutput Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cbenchmark := int32(0) + if benchmark { cbenchmark = int32(1) } +cdeterministic := int32(0) + if deterministic { cdeterministic = int32(1) } +lib.AtgCudnnConvolutionBackwardWeight(ptr, weightSize, len(weightSize), gradOutput.ctensor, ts.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) CudnnConvolutionTranspose(weight Tensor, padding []int64, outputPadding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cbenchmark := int32(0) + if benchmark { cbenchmark = int32(1) } +cdeterministic := int32(0) + if deterministic { cdeterministic = int32(1) } +lib.AtgCudnnConvolutionTranspose(ptr, ts.ctensor, weight.ctensor, padding, len(padding), outputPadding, len(outputPadding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) CudnnConvolutionTranspose1(weight Tensor, bias Tensor, padding []int64, outputPadding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cbenchmark := int32(0) + if benchmark { cbenchmark = int32(1) } +cdeterministic := int32(0) + if deterministic { cdeterministic = int32(1) } +lib.AtgCudnnConvolutionTranspose1(ptr, ts.ctensor, weight.ctensor, bias.ctensor, padding, len(padding), outputPadding, len(outputPadding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func CudnnConvolutionTransposeBackwardInput(gradOutput Tensor, weight Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cbenchmark := int32(0) + if benchmark { cbenchmark = int32(1) } +cdeterministic := int32(0) + if deterministic { cdeterministic = int32(1) } +lib.AtgCudnnConvolutionTransposeBackwardInput(ptr, gradOutput.ctensor, weight.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) CudnnConvolutionTransposeBackwardWeight(weightSize []int64, gradOutput Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cbenchmark := int32(0) + if benchmark { cbenchmark = int32(1) } +cdeterministic := int32(0) + if deterministic { cdeterministic = int32(1) } +lib.AtgCudnnConvolutionTransposeBackwardWeight(ptr, weightSize, len(weightSize), gradOutput.ctensor, ts.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) CudnnGridSampler(grid Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCudnnGridSampler(ptr, ts.ctensor, grid.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Cumprod(dim int64, dtype gotch.DType, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCumprod(ptr, ts.ctensor, dim, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) CumprodOut(out Tensor, dim int64, dtype gotch.DType, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCumprodOut(ptr, out.ctensor, ts.ctensor, dim, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Cumsum(dim int64, dtype gotch.DType, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCumsum(ptr, ts.ctensor, dim, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) CumsumOut(out Tensor, dim int64, dtype gotch.DType, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCumsumOut(ptr, out.ctensor, ts.ctensor, dim, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Data(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgData(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Dequantize(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDequantize(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Det(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDet(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Detach(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDetach(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Detach_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDetach_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) Diag(diagonal int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDiag(ptr, ts.ctensor, diagonal) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) DiagEmbed(offset int64, dim1 int64, dim2 int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDiagEmbed(ptr, ts.ctensor, offset, dim1, dim2) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) DiagOut(out Tensor, diagonal int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDiagOut(ptr, out.ctensor, ts.ctensor, diagonal) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Diagflat(offset int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDiagflat(ptr, ts.ctensor, offset) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Diagonal(offset int64, dim1 int64, dim2 int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDiagonal(ptr, ts.ctensor, offset, dim1, dim2) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Digamma(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDigamma(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Digamma_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDigamma_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) DigammaOut(out Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDigammaOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Dist(other Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDist(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Div(other Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDiv(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Div1(other Scalar, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDiv1(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Div_(other Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDiv_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) Div1_(other Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDiv1_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) DivOut(out Tensor, other Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDivOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Dot(tensor Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDot(ptr, ts.ctensor, tensor.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) DotOut(out Tensor, tensor Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDotOut(ptr, out.ctensor, ts.ctensor, tensor.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func Dropout(input Tensor, p float64, train bool)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ctrain := int32(0) + if train { ctrain = int32(1) } +lib.AtgDropout(ptr, input.ctensor, p, ctrain) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Dropout_(p float64, train bool)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ctrain := int32(0) + if train { ctrain = int32(1) } +lib.AtgDropout_(ptr, ts.ctensor, p, ctrain) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func Einsum(equation string, tensors []Tensor)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var ctensors []lib.Ctensor + for _, t := range tensors {ctensors = append(ctensors, t.ctensor)} +lib.AtgEinsum(ptr, equation, ctensors, len(ctensors)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Elu(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgElu(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Elu_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgElu_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func EluBackward(gradOutput Tensor, alpha Scalar, scale Scalar, inputScale Scalar, output Tensor)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgEluBackward(ptr, gradOutput.ctensor, alpha.cscalar, scale.cscalar, inputScale.cscalar, output.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func EluBackwardOut(gradInput Tensor, gradOutput Tensor, alpha Scalar, scale Scalar, inputScale Scalar, output Tensor)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgEluBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, alpha.cscalar, scale.cscalar, inputScale.cscalar, output.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) EluOut(out Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgEluOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func Embedding(weight Tensor, indices Tensor, paddingIdx int64, scaleGradByFreq bool, sparse bool)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cscaleGradByFreq := int32(0) + if scaleGradByFreq { cscaleGradByFreq = int32(1) } +csparse := int32(0) + if sparse { csparse = int32(1) } +lib.AtgEmbedding(ptr, weight.ctensor, indices.ctensor, paddingIdx, cscaleGradByFreq, csparse) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func EmbeddingBackward(grad Tensor, indices Tensor, numWeights int64, paddingIdx int64, scaleGradByFreq bool, sparse bool)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cscaleGradByFreq := int32(0) + if scaleGradByFreq { cscaleGradByFreq = int32(1) } +csparse := int32(0) + if sparse { csparse = int32(1) } +lib.AtgEmbeddingBackward(ptr, grad.ctensor, indices.ctensor, numWeights, paddingIdx, cscaleGradByFreq, csparse) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func EmbeddingDenseBackward(gradOutput Tensor, indices Tensor, numWeights int64, paddingIdx int64, scaleGradByFreq bool)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cscaleGradByFreq := int32(0) + if scaleGradByFreq { cscaleGradByFreq = int32(1) } +lib.AtgEmbeddingDenseBackward(ptr, gradOutput.ctensor, indices.ctensor, numWeights, paddingIdx, cscaleGradByFreq) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) EmbeddingRenorm_(indices Tensor, maxNorm float64, normType float64)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgEmbeddingRenorm_(ptr, ts.ctensor, indices.ctensor, maxNorm, normType) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func EmbeddingSparseBackward(grad Tensor, indices Tensor, numWeights int64, paddingIdx int64, scaleGradByFreq bool)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cscaleGradByFreq := int32(0) + if scaleGradByFreq { cscaleGradByFreq = int32(1) } +lib.AtgEmbeddingSparseBackward(ptr, grad.ctensor, indices.ctensor, numWeights, paddingIdx, cscaleGradByFreq) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func Empty(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgEmpty(ptr, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) EmptyLike(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgEmptyLike(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func EmptyOut(out Tensor, size []int64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgEmptyOut(ptr, out.ctensor, size, len(size)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func EmptyStrided(size []int64, stride []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgEmptyStrided(ptr, size, len(size), stride, len(stride), optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Eq(other Scalar, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgEq(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Eq1(other Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgEq1(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Eq_(other Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgEq_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) Eq1_(other Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgEq1_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) EqOut(out Tensor, other Scalar, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgEqOut(ptr, out.ctensor, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) EqOut1(out Tensor, other Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgEqOut1(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Erf(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgErf(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Erf_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgErf_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) ErfOut(out Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgErfOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Erfc(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgErfc(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Erfc_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgErfc_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) ErfcOut(out Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgErfcOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Erfinv(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgErfinv(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Erfinv_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgErfinv_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) ErfinvOut(out Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgErfinvOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Exp(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgExp(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Exp_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgExp_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) ExpOut(out Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgExpOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Expand(size []int64, implicit bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cimplicit := int32(0) + if implicit { cimplicit = int32(1) } +lib.AtgExpand(ptr, ts.ctensor, size, len(size), cimplicit) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) ExpandAs(other Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgExpandAs(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Expm1(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgExpm1(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Expm1_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgExpm1_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) Expm1Out(out Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgExpm1Out(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Exponential_(lambd float64)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgExponential_(ptr, ts.ctensor, lambd) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func Eye(n int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgEye(ptr, n, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func Eye1(n int64, m int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgEye1(ptr, n, m, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func EyeOut(out Tensor, n int64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgEyeOut(ptr, out.ctensor, n) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func EyeOut1(out Tensor, n int64, m int64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgEyeOut1(ptr, out.ctensor, n, m) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) FakeQuantizePerChannelAffine(scale Tensor, zeroPoint Tensor, axis int64, quantMin int64, quantMax int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFakeQuantizePerChannelAffine(ptr, ts.ctensor, scale.ctensor, zeroPoint.ctensor, axis, quantMin, quantMax) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) FakeQuantizePerChannelAffineBackward(grad Tensor, scale Tensor, zeroPoint Tensor, axis int64, quantMin int64, quantMax int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFakeQuantizePerChannelAffineBackward(ptr, grad.ctensor, ts.ctensor, scale.ctensor, zeroPoint.ctensor, axis, quantMin, quantMax) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) FakeQuantizePerTensorAffine(scale float64, zeroPoint int64, quantMin int64, quantMax int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFakeQuantizePerTensorAffine(ptr, ts.ctensor, scale, zeroPoint, quantMin, quantMax) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) FakeQuantizePerTensorAffineBackward(grad Tensor, scale float64, zeroPoint int64, quantMin int64, quantMax int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFakeQuantizePerTensorAffineBackward(ptr, grad.ctensor, ts.ctensor, scale, zeroPoint, quantMin, quantMax) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func FbgemmLinearFp16Weight(input Tensor, packedWeight Tensor, bias Tensor)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFbgemmLinearFp16Weight(ptr, input.ctensor, packedWeight.ctensor, bias.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func FbgemmLinearFp16WeightFp32Activation(input Tensor, packedWeight Tensor, bias Tensor)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFbgemmLinearFp16WeightFp32Activation(ptr, input.ctensor, packedWeight.ctensor, bias.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func FbgemmLinearInt8Weight(input Tensor, weight Tensor, packed Tensor, colOffsets Tensor, weightScale Scalar, weightZeroPoint Scalar, bias Tensor)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFbgemmLinearInt8Weight(ptr, input.ctensor, weight.ctensor, packed.ctensor, colOffsets.ctensor, weightScale.cscalar, weightZeroPoint.cscalar, bias.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func FbgemmLinearInt8WeightFp32Activation(input Tensor, weight Tensor, packed Tensor, colOffsets Tensor, weightScale Scalar, weightZeroPoint Scalar, bias Tensor)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFbgemmLinearInt8WeightFp32Activation(ptr, input.ctensor, weight.ctensor, packed.ctensor, colOffsets.ctensor, weightScale.cscalar, weightZeroPoint.cscalar, bias.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func FbgemmPackGemmMatrixFp16(input Tensor)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFbgemmPackGemmMatrixFp16(ptr, input.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func FbgemmPackQuantizedMatrix(input Tensor)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFbgemmPackQuantizedMatrix(ptr, input.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func FbgemmPackQuantizedMatrix1(input Tensor, k int64, n int64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFbgemmPackQuantizedMatrix1(ptr, input.ctensor, k, n) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func FeatureAlphaDropout(input Tensor, p float64, train bool)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ctrain := int32(0) + if train { ctrain = int32(1) } +lib.AtgFeatureAlphaDropout(ptr, input.ctensor, p, ctrain) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) FeatureAlphaDropout_(p float64, train bool)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ctrain := int32(0) + if train { ctrain = int32(1) } +lib.AtgFeatureAlphaDropout_(ptr, ts.ctensor, p, ctrain) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func FeatureDropout(input Tensor, p float64, train bool)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ctrain := int32(0) + if train { ctrain = int32(1) } +lib.AtgFeatureDropout(ptr, input.ctensor, p, ctrain) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) FeatureDropout_(p float64, train bool)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ctrain := int32(0) + if train { ctrain = int32(1) } +lib.AtgFeatureDropout_(ptr, ts.ctensor, p, ctrain) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) Fft(signalNdim int64, normalized bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cnormalized := int32(0) + if normalized { cnormalized = int32(1) } +lib.AtgFft(ptr, ts.ctensor, signalNdim, cnormalized) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Fill_(value Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFill_(ptr, ts.ctensor, value.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) Fill1_(value Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFill1_(ptr, ts.ctensor, value.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) FillDiagonal_(fillValue Scalar, wrap bool)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cwrap := int32(0) + if wrap { cwrap = int32(1) } +lib.AtgFillDiagonal_(ptr, ts.ctensor, fillValue.cscalar, cwrap) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) Flatten(startDim int64, endDim int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFlatten(ptr, ts.ctensor, startDim, endDim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Flip(dims []int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFlip(ptr, ts.ctensor, dims, len(dims)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Floor(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFloor(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Floor_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFloor_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) FloorDivide(other Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFloorDivide(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) FloorDivide1(other Scalar, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFloorDivide1(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) FloorDivide_(other Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFloorDivide_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) FloorDivide1_(other Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFloorDivide1_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) FloorDivideOut(out Tensor, other Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFloorDivideOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) FloorOut(out Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFloorOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Fmod(other Scalar, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFmod(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Fmod1(other Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFmod1(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Fmod_(other Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFmod_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) Fmod1_(other Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFmod1_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) FmodOut(out Tensor, other Scalar, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFmodOut(ptr, out.ctensor, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) FmodOut1(out Tensor, other Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFmodOut1(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Frac(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFrac(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Frac_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFrac_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) FracOut(out Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFracOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) FractionalMaxPool2dBackward(gradOutput Tensor, kernelSize []int64, outputSize []int64, indices Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFractionalMaxPool2dBackward(ptr, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), outputSize, len(outputSize), indices.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) FractionalMaxPool2dBackwardOut(gradInput Tensor, gradOutput Tensor, kernelSize []int64, outputSize []int64, indices Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFractionalMaxPool2dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), outputSize, len(outputSize), indices.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) FractionalMaxPool3dBackward(gradOutput Tensor, kernelSize []int64, outputSize []int64, indices Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFractionalMaxPool3dBackward(ptr, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), outputSize, len(outputSize), indices.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) FractionalMaxPool3dBackwardOut(gradInput Tensor, gradOutput Tensor, kernelSize []int64, outputSize []int64, indices Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFractionalMaxPool3dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), outputSize, len(outputSize), indices.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) FrobeniusNorm(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFrobeniusNorm(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) FrobeniusNorm1(dim []int64, keepdim bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } +lib.AtgFrobeniusNorm1(ptr, ts.ctensor, dim, len(dim), ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) FrobeniusNormOut(out Tensor, dim []int64, keepdim bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } +lib.AtgFrobeniusNormOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func FromFile(filename string, shared bool, size int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cshared := int32(0) + if shared { cshared = int32(1) } +lib.AtgFromFile(ptr, filename, cshared, size, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func Full(size []int64, fillValue Scalar, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFull(ptr, size, len(size), fillValue.cscalar, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) FullLike(fillValue Scalar, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFullLike(ptr, ts.ctensor, fillValue.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func FullOut(out Tensor, size []int64, fillValue Scalar)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFullOut(ptr, out.ctensor, size, len(size), fillValue.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Gather(dim int64, index Tensor, sparseGrad bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + csparseGrad := int32(0) + if sparseGrad { csparseGrad = int32(1) } +lib.AtgGather(ptr, ts.ctensor, dim, index.ctensor, csparseGrad) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) GatherOut(out Tensor, dim int64, index Tensor, sparseGrad bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + csparseGrad := int32(0) + if sparseGrad { csparseGrad = int32(1) } +lib.AtgGatherOut(ptr, out.ctensor, ts.ctensor, dim, index.ctensor, csparseGrad) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Ge(other Scalar, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGe(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Ge1(other Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGe1(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Ge_(other Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGe_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) Ge1_(other Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGe1_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) GeOut(out Tensor, other Scalar, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGeOut(ptr, out.ctensor, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) GeOut1(out Tensor, other Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGeOut1(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Gelu(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGelu(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) GeluBackward(grad Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGeluBackward(ptr, grad.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Geometric_(p float64)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGeometric_(ptr, ts.ctensor, p) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) Ger(vec2 Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGer(ptr, ts.ctensor, vec2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) GerOut(out Tensor, vec2 Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGerOut(ptr, out.ctensor, ts.ctensor, vec2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Glu(dim int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGlu(ptr, ts.ctensor, dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) GluBackward(gradOutput Tensor, dim int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGluBackward(ptr, gradOutput.ctensor, ts.ctensor, dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) GluBackwardOut(gradInput Tensor, gradOutput Tensor, dim int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGluBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) GluOut(out Tensor, dim int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGluOut(ptr, out.ctensor, ts.ctensor, dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Grad(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGrad(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func GridSampler(input Tensor, grid Tensor, interpolationMode int64, paddingMode int64, alignCorners bool)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { calignCorners = int32(1) } +lib.AtgGridSampler(ptr, input.ctensor, grid.ctensor, interpolationMode, paddingMode, calignCorners) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func GridSampler2d(input Tensor, grid Tensor, interpolationMode int64, paddingMode int64, alignCorners bool)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { calignCorners = int32(1) } +lib.AtgGridSampler2d(ptr, input.ctensor, grid.ctensor, interpolationMode, paddingMode, calignCorners) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func GridSampler3d(input Tensor, grid Tensor, interpolationMode int64, paddingMode int64, alignCorners bool)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { calignCorners = int32(1) } +lib.AtgGridSampler3d(ptr, input.ctensor, grid.ctensor, interpolationMode, paddingMode, calignCorners) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func GroupNorm(input Tensor, numGroups int64, weight Tensor, bias Tensor, eps float64, cudnnEnabled bool)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ccudnnEnabled := int32(0) + if cudnnEnabled { ccudnnEnabled = int32(1) } +lib.AtgGroupNorm(ptr, input.ctensor, numGroups, weight.ctensor, bias.ctensor, eps, ccudnnEnabled) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func GruCell(input Tensor, hx Tensor, wIh Tensor, wHh Tensor, bIh Tensor, bHh Tensor)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGruCell(ptr, input.ctensor, hx.ctensor, wIh.ctensor, wHh.ctensor, bIh.ctensor, bHh.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Gt(other Scalar, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGt(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Gt1(other Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGt1(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Gt_(other Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGt_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) Gt1_(other Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGt1_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) GtOut(out Tensor, other Scalar, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGtOut(ptr, out.ctensor, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) GtOut1(out Tensor, other Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGtOut1(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func HammingWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHammingWindow(ptr, windowLength, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func HammingWindow1(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cperiodic := int32(0) + if periodic { cperiodic = int32(1) } +lib.AtgHammingWindow1(ptr, windowLength, cperiodic, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func HammingWindow2(windowLength int64, periodic bool, alpha float64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cperiodic := int32(0) + if periodic { cperiodic = int32(1) } +lib.AtgHammingWindow2(ptr, windowLength, cperiodic, alpha, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func HammingWindow3(windowLength int64, periodic bool, alpha float64, beta float64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cperiodic := int32(0) + if periodic { cperiodic = int32(1) } +lib.AtgHammingWindow3(ptr, windowLength, cperiodic, alpha, beta, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func HannWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHannWindow(ptr, windowLength, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func HannWindow1(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cperiodic := int32(0) + if periodic { cperiodic = int32(1) } +lib.AtgHannWindow1(ptr, windowLength, cperiodic, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Hardshrink(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHardshrink(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) HardshrinkBackward(gradOut Tensor, lambd Scalar, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHardshrinkBackward(ptr, gradOut.ctensor, ts.ctensor, lambd.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Hardsigmoid(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHardsigmoid(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Hardsigmoid_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHardsigmoid_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) HardsigmoidBackward(gradOutput Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHardsigmoidBackward(ptr, gradOutput.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) HardsigmoidOut(out Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHardsigmoidOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Hardtanh(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHardtanh(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Hardtanh_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHardtanh_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) HardtanhBackward(gradOutput Tensor, minVal Scalar, maxVal Scalar, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHardtanhBackward(ptr, gradOutput.ctensor, ts.ctensor, minVal.cscalar, maxVal.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) HardtanhBackwardOut(gradInput Tensor, gradOutput Tensor, minVal Scalar, maxVal Scalar, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHardtanhBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, minVal.cscalar, maxVal.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) HardtanhOut(out Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHardtanhOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) HingeEmbeddingLoss(target Tensor, margin float64, reduction int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHingeEmbeddingLoss(ptr, ts.ctensor, target.ctensor, margin, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Histc(bins int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHistc(ptr, ts.ctensor, bins) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) HistcOut(out Tensor, bins int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHistcOut(ptr, out.ctensor, ts.ctensor, bins) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func Hspmm(mat1 Tensor, mat2 Tensor)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHspmm(ptr, mat1.ctensor, mat2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func HspmmOut(out Tensor, mat1 Tensor, mat2 Tensor)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHspmmOut(ptr, out.ctensor, mat1.ctensor, mat2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Ifft(signalNdim int64, normalized bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cnormalized := int32(0) + if normalized { cnormalized = int32(1) } +lib.AtgIfft(ptr, ts.ctensor, signalNdim, cnormalized) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Im2col(kernelSize []int64, dilation []int64, padding []int64, stride []int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIm2col(ptr, ts.ctensor, kernelSize, len(kernelSize), dilation, len(dilation), padding, len(padding), stride, len(stride)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func Im2colBackward(gradOutput Tensor, inputSize []int64, kernelSize []int64, dilation []int64, padding []int64, stride []int64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIm2colBackward(ptr, gradOutput.ctensor, inputSize, len(inputSize), kernelSize, len(kernelSize), dilation, len(dilation), padding, len(padding), stride, len(stride)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func Im2colBackwardOut(gradInput Tensor, gradOutput Tensor, inputSize []int64, kernelSize []int64, dilation []int64, padding []int64, stride []int64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIm2colBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, inputSize, len(inputSize), kernelSize, len(kernelSize), dilation, len(dilation), padding, len(padding), stride, len(stride)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Im2colOut(out Tensor, kernelSize []int64, dilation []int64, padding []int64, stride []int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIm2colOut(ptr, out.ctensor, ts.ctensor, kernelSize, len(kernelSize), dilation, len(dilation), padding, len(padding), stride, len(stride)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Imag(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgImag(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Index(indices []Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cindices []lib.Ctensor + for _, t := range indices {cindices = append(cindices, t.ctensor)} +lib.AtgIndex(ptr, ts.ctensor, cindices, len(cindices)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) IndexAdd(dim int64, index Tensor, source Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIndexAdd(ptr, ts.ctensor, dim, index.ctensor, source.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) IndexAdd_(dim int64, index Tensor, source Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIndexAdd_(ptr, ts.ctensor, dim, index.ctensor, source.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) IndexCopy(dim int64, index Tensor, source Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIndexCopy(ptr, ts.ctensor, dim, index.ctensor, source.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) IndexCopy_(dim int64, index Tensor, source Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIndexCopy_(ptr, ts.ctensor, dim, index.ctensor, source.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) IndexFill(dim int64, index Tensor, value Scalar, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIndexFill(ptr, ts.ctensor, dim, index.ctensor, value.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) IndexFill1(dim int64, index Tensor, value Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIndexFill1(ptr, ts.ctensor, dim, index.ctensor, value.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) IndexFill_(dim int64, index Tensor, value Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIndexFill_(ptr, ts.ctensor, dim, index.ctensor, value.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) IndexFill1_(dim int64, index Tensor, value Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIndexFill1_(ptr, ts.ctensor, dim, index.ctensor, value.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) IndexPut(indices []Tensor, values Tensor, accumulate bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cindices []lib.Ctensor + for _, t := range indices {cindices = append(cindices, t.ctensor)} +caccumulate := int32(0) + if accumulate { caccumulate = int32(1) } +lib.AtgIndexPut(ptr, ts.ctensor, cindices, len(cindices), values.ctensor, caccumulate) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) IndexPut_(indices []Tensor, values Tensor, accumulate bool)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cindices []lib.Ctensor + for _, t := range indices {cindices = append(cindices, t.ctensor)} +caccumulate := int32(0) + if accumulate { caccumulate = int32(1) } +lib.AtgIndexPut_(ptr, ts.ctensor, cindices, len(cindices), values.ctensor, caccumulate) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) IndexSelect(dim int64, index Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIndexSelect(ptr, ts.ctensor, dim, index.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) IndexSelectOut(out Tensor, dim int64, index Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIndexSelectOut(ptr, out.ctensor, ts.ctensor, dim, index.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Indices(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIndices(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func InstanceNorm(input Tensor, weight Tensor, bias Tensor, runningMean Tensor, runningVar Tensor, useInputStats bool, momentum float64, eps float64, cudnnEnabled bool)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cuseInputStats := int32(0) + if useInputStats { cuseInputStats = int32(1) } +ccudnnEnabled := int32(0) + if cudnnEnabled { ccudnnEnabled = int32(1) } +lib.AtgInstanceNorm(ptr, input.ctensor, weight.ctensor, bias.ctensor, runningMean.ctensor, runningVar.ctensor, cuseInputStats, momentum, eps, ccudnnEnabled) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) IntRepr(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIntRepr(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Inverse(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgInverse(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) InverseOut(out Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgInverseOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Irfft(signalNdim int64, normalized bool, onesided bool, signalSizes []int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cnormalized := int32(0) + if normalized { cnormalized = int32(1) } +conesided := int32(0) + if onesided { conesided = int32(1) } +lib.AtgIrfft(ptr, ts.ctensor, signalNdim, cnormalized, conesided, signalSizes, len(signalSizes)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Isclose(other Tensor, rtol float64, atol float64, equalNan bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cequalNan := int32(0) + if equalNan { cequalNan = int32(1) } +lib.AtgIsclose(ptr, ts.ctensor, other.ctensor, rtol, atol, cequalNan) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Isfinite(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIsfinite(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Isinf(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIsinf(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Isnan(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIsnan(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) KlDiv(target Tensor, reduction int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgKlDiv(ptr, ts.ctensor, target.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) KlDivBackward(gradOutput Tensor, target Tensor, reduction int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgKlDivBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) L1Loss(target Tensor, reduction int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgL1Loss(ptr, ts.ctensor, target.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) L1LossBackward(gradOutput Tensor, target Tensor, reduction int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgL1LossBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) L1LossBackwardOut(gradInput Tensor, gradOutput Tensor, target Tensor, reduction int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgL1LossBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) L1LossOut(out Tensor, target Tensor, reduction int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgL1LossOut(ptr, out.ctensor, ts.ctensor, target.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func LayerNorm(input Tensor, normalizedShape []int64, weight Tensor, bias Tensor, eps float64, cudnnEnable bool)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ccudnnEnable := int32(0) + if cudnnEnable { ccudnnEnable = int32(1) } +lib.AtgLayerNorm(ptr, input.ctensor, normalizedShape, len(normalizedShape), weight.ctensor, bias.ctensor, eps, ccudnnEnable) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Le(other Scalar, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLe(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Le1(other Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLe1(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Le_(other Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLe_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) Le1_(other Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLe1_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) LeOut(out Tensor, other Scalar, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLeOut(ptr, out.ctensor, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) LeOut1(out Tensor, other Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLeOut1(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) LeakyRelu(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLeakyRelu(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) LeakyRelu_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLeakyRelu_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) LeakyReluBackward(gradOutput Tensor, negativeSlope Scalar, selfIsResult bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cselfIsResult := int32(0) + if selfIsResult { cselfIsResult = int32(1) } +lib.AtgLeakyReluBackward(ptr, gradOutput.ctensor, ts.ctensor, negativeSlope.cscalar, cselfIsResult) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) LeakyReluOut(out Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLeakyReluOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Lerp(end Tensor, weight Scalar, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLerp(ptr, ts.ctensor, end.ctensor, weight.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Lerp1(end Tensor, weight Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLerp1(ptr, ts.ctensor, end.ctensor, weight.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Lerp_(end Tensor, weight Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLerp_(ptr, ts.ctensor, end.ctensor, weight.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) Lerp1_(end Tensor, weight Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLerp1_(ptr, ts.ctensor, end.ctensor, weight.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) LerpOut(out Tensor, end Tensor, weight Scalar, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLerpOut(ptr, out.ctensor, ts.ctensor, end.ctensor, weight.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) LerpOut1(out Tensor, end Tensor, weight Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLerpOut1(ptr, out.ctensor, ts.ctensor, end.ctensor, weight.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Lgamma(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLgamma(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Lgamma_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLgamma_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) LgammaOut(out Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLgammaOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func Linear(input Tensor, weight Tensor, bias Tensor)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLinear(ptr, input.ctensor, weight.ctensor, bias.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func Linspace(start Scalar, end Scalar, steps int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLinspace(ptr, start.cscalar, end.cscalar, steps, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func LinspaceOut(out Tensor, start Scalar, end Scalar, steps int64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLinspaceOut(ptr, out.ctensor, start.cscalar, end.cscalar, steps) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Log(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLog(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Log10(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLog10(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Log10_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLog10_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) Log10Out(out Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLog10Out(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Log1p(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLog1p(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Log1p_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLog1p_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) Log1pOut(out Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLog1pOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Log2(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLog2(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Log2_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLog2_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) Log2Out(out Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLog2Out(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Log_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLog_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) LogNormal_(mean float64, std float64)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogNormal_(ptr, ts.ctensor, mean, std) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) LogOut(out Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) LogSigmoid(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogSigmoid(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) LogSigmoidBackward(gradOutput Tensor, buffer Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogSigmoidBackward(ptr, gradOutput.ctensor, ts.ctensor, buffer.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) LogSigmoidBackwardOut(gradInput Tensor, gradOutput Tensor, buffer Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogSigmoidBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, buffer.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) LogSigmoidOut(out Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogSigmoidOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) LogSoftmax(dim int64, dtype gotch.DType, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogSoftmax(ptr, ts.ctensor, dim, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Logdet(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogdet(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) LogicalAnd(other Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogicalAnd(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) LogicalAnd_(other Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogicalAnd_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) LogicalAndOut(out Tensor, other Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogicalAndOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) LogicalNot(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogicalNot(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) LogicalNot_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogicalNot_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) LogicalNotOut(out Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogicalNotOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) LogicalOr(other Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogicalOr(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) LogicalOr_(other Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogicalOr_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) LogicalOrOut(out Tensor, other Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogicalOrOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) LogicalXor(other Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogicalXor(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) LogicalXor_(other Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogicalXor_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) LogicalXorOut(out Tensor, other Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogicalXorOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func Logspace(start Scalar, end Scalar, steps int64, base float64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogspace(ptr, start.cscalar, end.cscalar, steps, base, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func LogspaceOut(out Tensor, start Scalar, end Scalar, steps int64, base float64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogspaceOut(ptr, out.ctensor, start.cscalar, end.cscalar, steps, base) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Logsumexp(dim []int64, keepdim bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } +lib.AtgLogsumexp(ptr, ts.ctensor, dim, len(dim), ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) LogsumexpOut(out Tensor, dim []int64, keepdim bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } +lib.AtgLogsumexpOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Lt(other Scalar, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLt(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Lt1(other Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLt1(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Lt_(other Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLt_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) Lt1_(other Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLt1_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) LtOut(out Tensor, other Scalar, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLtOut(ptr, out.ctensor, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) LtOut1(out Tensor, other Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLtOut1(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) LuSolve(lUData Tensor, lUPivots Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLuSolve(ptr, ts.ctensor, lUData.ctensor, lUPivots.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) LuSolveOut(out Tensor, lUData Tensor, lUPivots Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLuSolveOut(ptr, out.ctensor, ts.ctensor, lUData.ctensor, lUPivots.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func MarginRankingLoss(input1 Tensor, input2 Tensor, target Tensor, margin float64, reduction int64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMarginRankingLoss(ptr, input1.ctensor, input2.ctensor, target.ctensor, margin, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) MaskedFill(mask Tensor, value Scalar, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMaskedFill(ptr, ts.ctensor, mask.ctensor, value.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) MaskedFill1(mask Tensor, value Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMaskedFill1(ptr, ts.ctensor, mask.ctensor, value.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) MaskedFill_(mask Tensor, value Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMaskedFill_(ptr, ts.ctensor, mask.ctensor, value.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) MaskedFill1_(mask Tensor, value Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMaskedFill1_(ptr, ts.ctensor, mask.ctensor, value.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) MaskedScatter(mask Tensor, source Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMaskedScatter(ptr, ts.ctensor, mask.ctensor, source.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) MaskedScatter_(mask Tensor, source Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMaskedScatter_(ptr, ts.ctensor, mask.ctensor, source.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) MaskedSelect(mask Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMaskedSelect(ptr, ts.ctensor, mask.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) MaskedSelectOut(out Tensor, mask Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMaskedSelectOut(ptr, out.ctensor, ts.ctensor, mask.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Matmul(other Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMatmul(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) MatmulOut(out Tensor, other Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMatmulOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) MatrixPower(n int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMatrixPower(ptr, ts.ctensor, n) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) MatrixRank(symmetric bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + csymmetric := int32(0) + if symmetric { csymmetric = int32(1) } +lib.AtgMatrixRank(ptr, ts.ctensor, csymmetric) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) MatrixRank1(tol float64, symmetric bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + csymmetric := int32(0) + if symmetric { csymmetric = int32(1) } +lib.AtgMatrixRank1(ptr, ts.ctensor, tol, csymmetric) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Max(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMax(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Max1(other Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMax1(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) MaxOut(out Tensor, other Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMaxOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) MaxPool1d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cceilMode := int32(0) + if ceilMode { cceilMode = int32(1) } +lib.AtgMaxPool1d(ptr, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) MaxPool2d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cceilMode := int32(0) + if ceilMode { cceilMode = int32(1) } +lib.AtgMaxPool2d(ptr, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) MaxPool2dWithIndicesBackward(gradOutput Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, indices Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cceilMode := int32(0) + if ceilMode { cceilMode = int32(1) } +lib.AtgMaxPool2dWithIndicesBackward(ptr, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode, indices.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) MaxPool2dWithIndicesBackwardOut(gradInput Tensor, gradOutput Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, indices Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cceilMode := int32(0) + if ceilMode { cceilMode = int32(1) } +lib.AtgMaxPool2dWithIndicesBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode, indices.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) MaxPool3d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cceilMode := int32(0) + if ceilMode { cceilMode = int32(1) } +lib.AtgMaxPool3d(ptr, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) MaxPool3dWithIndicesBackward(gradOutput Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, indices Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cceilMode := int32(0) + if ceilMode { cceilMode = int32(1) } +lib.AtgMaxPool3dWithIndicesBackward(ptr, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode, indices.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) MaxPool3dWithIndicesBackwardOut(gradInput Tensor, gradOutput Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, indices Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cceilMode := int32(0) + if ceilMode { cceilMode = int32(1) } +lib.AtgMaxPool3dWithIndicesBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode, indices.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) MaxUnpool2d(indices Tensor, outputSize []int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMaxUnpool2d(ptr, ts.ctensor, indices.ctensor, outputSize, len(outputSize)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) MaxUnpool2dBackward(gradOutput Tensor, indices Tensor, outputSize []int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMaxUnpool2dBackward(ptr, gradOutput.ctensor, ts.ctensor, indices.ctensor, outputSize, len(outputSize)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) MaxUnpool2dBackwardOut(gradInput Tensor, gradOutput Tensor, indices Tensor, outputSize []int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMaxUnpool2dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, indices.ctensor, outputSize, len(outputSize)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) MaxUnpool2dOut(out Tensor, indices Tensor, outputSize []int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMaxUnpool2dOut(ptr, out.ctensor, ts.ctensor, indices.ctensor, outputSize, len(outputSize)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) MaxUnpool3d(indices Tensor, outputSize []int64, stride []int64, padding []int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMaxUnpool3d(ptr, ts.ctensor, indices.ctensor, outputSize, len(outputSize), stride, len(stride), padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) MaxUnpool3dBackward(gradOutput Tensor, indices Tensor, outputSize []int64, stride []int64, padding []int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMaxUnpool3dBackward(ptr, gradOutput.ctensor, ts.ctensor, indices.ctensor, outputSize, len(outputSize), stride, len(stride), padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) MaxUnpool3dBackwardOut(gradInput Tensor, gradOutput Tensor, indices Tensor, outputSize []int64, stride []int64, padding []int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMaxUnpool3dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, indices.ctensor, outputSize, len(outputSize), stride, len(stride), padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) MaxUnpool3dOut(out Tensor, indices Tensor, outputSize []int64, stride []int64, padding []int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMaxUnpool3dOut(ptr, out.ctensor, ts.ctensor, indices.ctensor, outputSize, len(outputSize), stride, len(stride), padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) MaxValues(dim []int64, keepdim bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } +lib.AtgMaxValues(ptr, ts.ctensor, dim, len(dim), ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Mean(dtype gotch.DType, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMean(ptr, ts.ctensor, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Mean1(dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } +lib.AtgMean1(ptr, ts.ctensor, dim, len(dim), ckeepdim, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) MeanOut(out Tensor, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } +lib.AtgMeanOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), ckeepdim, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Median(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMedian(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Min(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMin(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Min1(other Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMin1(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) MinOut(out Tensor, other Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMinOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) MinValues(dim []int64, keepdim bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } +lib.AtgMinValues(ptr, ts.ctensor, dim, len(dim), ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) MiopenConvolution(weight Tensor, bias Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cbenchmark := int32(0) + if benchmark { cbenchmark = int32(1) } +cdeterministic := int32(0) + if deterministic { cdeterministic = int32(1) } +lib.AtgMiopenConvolution(ptr, ts.ctensor, weight.ctensor, bias.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func MiopenConvolutionBackwardBias(gradOutput Tensor)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMiopenConvolutionBackwardBias(ptr, gradOutput.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func MiopenConvolutionBackwardInput(selfSize []int64, gradOutput Tensor, weight Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cbenchmark := int32(0) + if benchmark { cbenchmark = int32(1) } +cdeterministic := int32(0) + if deterministic { cdeterministic = int32(1) } +lib.AtgMiopenConvolutionBackwardInput(ptr, selfSize, len(selfSize), gradOutput.ctensor, weight.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) MiopenConvolutionBackwardWeight(weightSize []int64, gradOutput Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cbenchmark := int32(0) + if benchmark { cbenchmark = int32(1) } +cdeterministic := int32(0) + if deterministic { cdeterministic = int32(1) } +lib.AtgMiopenConvolutionBackwardWeight(ptr, weightSize, len(weightSize), gradOutput.ctensor, ts.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) MiopenConvolutionTranspose(weight Tensor, bias Tensor, padding []int64, outputPadding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cbenchmark := int32(0) + if benchmark { cbenchmark = int32(1) } +cdeterministic := int32(0) + if deterministic { cdeterministic = int32(1) } +lib.AtgMiopenConvolutionTranspose(ptr, ts.ctensor, weight.ctensor, bias.ctensor, padding, len(padding), outputPadding, len(outputPadding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func MiopenConvolutionTransposeBackwardInput(gradOutput Tensor, weight Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cbenchmark := int32(0) + if benchmark { cbenchmark = int32(1) } +cdeterministic := int32(0) + if deterministic { cdeterministic = int32(1) } +lib.AtgMiopenConvolutionTransposeBackwardInput(ptr, gradOutput.ctensor, weight.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) MiopenConvolutionTransposeBackwardWeight(weightSize []int64, gradOutput Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cbenchmark := int32(0) + if benchmark { cbenchmark = int32(1) } +cdeterministic := int32(0) + if deterministic { cdeterministic = int32(1) } +lib.AtgMiopenConvolutionTransposeBackwardWeight(ptr, weightSize, len(weightSize), gradOutput.ctensor, ts.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) MiopenDepthwiseConvolution(weight Tensor, bias Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cbenchmark := int32(0) + if benchmark { cbenchmark = int32(1) } +cdeterministic := int32(0) + if deterministic { cdeterministic = int32(1) } +lib.AtgMiopenDepthwiseConvolution(ptr, ts.ctensor, weight.ctensor, bias.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func MiopenDepthwiseConvolutionBackwardInput(selfSize []int64, gradOutput Tensor, weight Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cbenchmark := int32(0) + if benchmark { cbenchmark = int32(1) } +cdeterministic := int32(0) + if deterministic { cdeterministic = int32(1) } +lib.AtgMiopenDepthwiseConvolutionBackwardInput(ptr, selfSize, len(selfSize), gradOutput.ctensor, weight.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) MiopenDepthwiseConvolutionBackwardWeight(weightSize []int64, gradOutput Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cbenchmark := int32(0) + if benchmark { cbenchmark = int32(1) } +cdeterministic := int32(0) + if deterministic { cdeterministic = int32(1) } +lib.AtgMiopenDepthwiseConvolutionBackwardWeight(ptr, weightSize, len(weightSize), gradOutput.ctensor, ts.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) MkldnnAdaptiveAvgPool2d(outputSize []int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMkldnnAdaptiveAvgPool2d(ptr, ts.ctensor, outputSize, len(outputSize)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) MkldnnConvolution(weight Tensor, bias Tensor, padding []int64, stride []int64, dilation []int64, groups int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMkldnnConvolution(ptr, ts.ctensor, weight.ctensor, bias.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func MkldnnConvolutionBackwardInput(selfSize []int64, gradOutput Tensor, weight Tensor, padding []int64, stride []int64, dilation []int64, groups int64, biasDefined bool)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cbiasDefined := int32(0) + if biasDefined { cbiasDefined = int32(1) } +lib.AtgMkldnnConvolutionBackwardInput(ptr, selfSize, len(selfSize), gradOutput.ctensor, weight.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbiasDefined) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func MkldnnLinear(input Tensor, weight Tensor, bias Tensor)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMkldnnLinear(ptr, input.ctensor, weight.ctensor, bias.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) MkldnnMaxPool2d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cceilMode := int32(0) + if ceilMode { cceilMode = int32(1) } +lib.AtgMkldnnMaxPool2d(ptr, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) MkldnnReorderConv2dWeight(padding []int64, stride []int64, dilation []int64, groups int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMkldnnReorderConv2dWeight(ptr, ts.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Mm(mat2 Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMm(ptr, ts.ctensor, mat2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) MmOut(out Tensor, mat2 Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMmOut(ptr, out.ctensor, ts.ctensor, mat2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) MseLoss(target Tensor, reduction int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMseLoss(ptr, ts.ctensor, target.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) MseLossBackward(gradOutput Tensor, target Tensor, reduction int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMseLossBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) MseLossBackwardOut(gradInput Tensor, gradOutput Tensor, target Tensor, reduction int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMseLossBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) MseLossOut(out Tensor, target Tensor, reduction int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMseLossOut(ptr, out.ctensor, ts.ctensor, target.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Mul(other Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMul(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Mul1(other Scalar, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMul1(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Mul_(other Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMul_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) Mul1_(other Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMul1_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) MulOut(out Tensor, other Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMulOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) MultiMarginLossBackward(gradOutput Tensor, target Tensor, p Scalar, margin Scalar, weight Tensor, reduction int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMultiMarginLossBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, p.cscalar, margin.cscalar, weight.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) MultiMarginLossBackwardOut(gradInput Tensor, gradOutput Tensor, target Tensor, p Scalar, margin Scalar, weight Tensor, reduction int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMultiMarginLossBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, p.cscalar, margin.cscalar, weight.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) MultilabelMarginLoss(target Tensor, reduction int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMultilabelMarginLoss(ptr, ts.ctensor, target.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) MultilabelMarginLossBackward(gradOutput Tensor, target Tensor, reduction int64, isTarget Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMultilabelMarginLossBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction, isTarget.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) MultilabelMarginLossBackwardOut(gradInput Tensor, gradOutput Tensor, target Tensor, reduction int64, isTarget Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMultilabelMarginLossBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction, isTarget.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) MultilabelMarginLossOut(out Tensor, target Tensor, reduction int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMultilabelMarginLossOut(ptr, out.ctensor, ts.ctensor, target.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Multinomial(numSamples int64, replacement bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + creplacement := int32(0) + if replacement { creplacement = int32(1) } +lib.AtgMultinomial(ptr, ts.ctensor, numSamples, creplacement) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) MultinomialOut(out Tensor, numSamples int64, replacement bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + creplacement := int32(0) + if replacement { creplacement = int32(1) } +lib.AtgMultinomialOut(ptr, out.ctensor, ts.ctensor, numSamples, creplacement) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Mv(vec Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMv(ptr, ts.ctensor, vec.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) MvOut(out Tensor, vec Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMvOut(ptr, out.ctensor, ts.ctensor, vec.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Mvlgamma(p int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMvlgamma(ptr, ts.ctensor, p) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Mvlgamma_(p int64)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMvlgamma_(ptr, ts.ctensor, p) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) Narrow(dim int64, start int64, length int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNarrow(ptr, ts.ctensor, dim, start, length) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Narrow1(dim int64, start Tensor, length int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNarrow1(ptr, ts.ctensor, dim, start.ctensor, length) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) NarrowCopy(dim int64, start int64, length int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNarrowCopy(ptr, ts.ctensor, dim, start, length) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) NativeNorm(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNativeNorm(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Ne(other Scalar, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNe(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Ne1(other Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNe1(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Ne_(other Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNe_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) Ne1_(other Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNe1_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) NeOut(out Tensor, other Scalar, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNeOut(ptr, out.ctensor, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) NeOut1(out Tensor, other Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNeOut1(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Neg(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNeg(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Neg_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNeg_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) NegOut(out Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNegOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) NewEmpty(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNewEmpty(ptr, ts.ctensor, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) NewFull(size []int64, fillValue Scalar, optionsKind gotch.DType, optionsDevice gotch.Device, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNewFull(ptr, ts.ctensor, size, len(size), fillValue.cscalar, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) NewZeros(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNewZeros(ptr, ts.ctensor, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) NllLoss(target Tensor, weight Tensor, reduction int64, ignoreIndex int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNllLoss(ptr, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) NllLoss2d(target Tensor, weight Tensor, reduction int64, ignoreIndex int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNllLoss2d(ptr, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) NllLoss2dBackward(gradOutput Tensor, target Tensor, weight Tensor, reduction int64, ignoreIndex int64, totalWeight Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNllLoss2dBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex, totalWeight.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) NllLoss2dBackwardOut(gradInput Tensor, gradOutput Tensor, target Tensor, weight Tensor, reduction int64, ignoreIndex int64, totalWeight Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNllLoss2dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex, totalWeight.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) NllLoss2dOut(out Tensor, target Tensor, weight Tensor, reduction int64, ignoreIndex int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNllLoss2dOut(ptr, out.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) NllLossBackward(gradOutput Tensor, target Tensor, weight Tensor, reduction int64, ignoreIndex int64, totalWeight Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNllLossBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex, totalWeight.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) NllLossBackwardOut(gradInput Tensor, gradOutput Tensor, target Tensor, weight Tensor, reduction int64, ignoreIndex int64, totalWeight Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNllLossBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex, totalWeight.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) NllLossOut(out Tensor, target Tensor, weight Tensor, reduction int64, ignoreIndex int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNllLossOut(ptr, out.ctensor, ts.ctensor, target.ctensor, weight.ctensor, reduction, ignoreIndex) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Nonzero(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNonzero(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) NonzeroOut(out Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNonzeroOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Norm(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNorm(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Norm1(p Scalar, dtype gotch.DType, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNorm1(ptr, ts.ctensor, p.cscalar, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Norm2(p Scalar, dim []int64, keepdim bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } +lib.AtgNorm2(ptr, ts.ctensor, p.cscalar, dim, len(dim), ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Norm3(p Scalar, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } +lib.AtgNorm3(ptr, ts.ctensor, p.cscalar, dim, len(dim), ckeepdim, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func NormExceptDim(v Tensor, pow int64, dim int64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNormExceptDim(ptr, v.ctensor, pow, dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) NormOut(out Tensor, p Scalar, dim []int64, keepdim bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } +lib.AtgNormOut(ptr, out.ctensor, ts.ctensor, p.cscalar, dim, len(dim), ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) NormOut1(out Tensor, p Scalar, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } +lib.AtgNormOut1(ptr, out.ctensor, ts.ctensor, p.cscalar, dim, len(dim), ckeepdim, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Normal_(mean float64, std float64)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNormal_(ptr, ts.ctensor, mean, std) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func NormalOut(out Tensor, mean Tensor, std float64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNormalOut(ptr, out.ctensor, mean.ctensor, std) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func NormalOut1(out Tensor, mean float64, std Tensor)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNormalOut1(ptr, out.ctensor, mean, std.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func NormalOut2(out Tensor, mean Tensor, std Tensor)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNormalOut2(ptr, out.ctensor, mean.ctensor, std.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func NormalOut3(out Tensor, mean float64, std float64, size []int64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNormalOut3(ptr, out.ctensor, mean, std, size, len(size)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) NuclearNorm(keepdim bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } +lib.AtgNuclearNorm(ptr, ts.ctensor, ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) NuclearNorm1(dim []int64, keepdim bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } +lib.AtgNuclearNorm1(ptr, ts.ctensor, dim, len(dim), ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) NuclearNormOut(out Tensor, keepdim bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } +lib.AtgNuclearNormOut(ptr, out.ctensor, ts.ctensor, ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) NuclearNormOut1(out Tensor, dim []int64, keepdim bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } +lib.AtgNuclearNormOut1(ptr, out.ctensor, ts.ctensor, dim, len(dim), ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) NumpyT(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNumpyT(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) OneHot(numClasses int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgOneHot(ptr, ts.ctensor, numClasses) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func Ones(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgOnes(ptr, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) OnesLike(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgOnesLike(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func OnesOut(out Tensor, size []int64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgOnesOut(ptr, out.ctensor, size, len(size)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Orgqr(input2 Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgOrgqr(ptr, ts.ctensor, input2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) OrgqrOut(out Tensor, input2 Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgOrgqrOut(ptr, out.ctensor, ts.ctensor, input2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Ormqr(input2 Tensor, input3 Tensor, left bool, transpose bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cleft := int32(0) + if left { cleft = int32(1) } +ctranspose := int32(0) + if transpose { ctranspose = int32(1) } +lib.AtgOrmqr(ptr, ts.ctensor, input2.ctensor, input3.ctensor, cleft, ctranspose) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) OrmqrOut(out Tensor, input2 Tensor, input3 Tensor, left bool, transpose bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cleft := int32(0) + if left { cleft = int32(1) } +ctranspose := int32(0) + if transpose { ctranspose = int32(1) } +lib.AtgOrmqrOut(ptr, out.ctensor, ts.ctensor, input2.ctensor, input3.ctensor, cleft, ctranspose) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func PairwiseDistance(x1 Tensor, x2 Tensor, p float64, eps float64, keepdim bool)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } +lib.AtgPairwiseDistance(ptr, x1.ctensor, x2.ctensor, p, eps, ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Pdist(p float64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgPdist(ptr, ts.ctensor, p) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Permute(dims []int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgPermute(ptr, ts.ctensor, dims, len(dims)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) PinMemory(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgPinMemory(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Pinverse(rcond float64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgPinverse(ptr, ts.ctensor, rcond) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) PixelShuffle(upscaleFactor int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgPixelShuffle(ptr, ts.ctensor, upscaleFactor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Poisson(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgPoisson(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func PoissonNllLoss(input Tensor, target Tensor, logInput bool, full bool, eps float64, reduction int64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + clogInput := int32(0) + if logInput { clogInput = int32(1) } +cfull := int32(0) + if full { cfull = int32(1) } +lib.AtgPoissonNllLoss(ptr, input.ctensor, target.ctensor, clogInput, cfull, eps, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Polygamma(n int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgPolygamma(ptr, n, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Polygamma_(n int64)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgPolygamma_(ptr, ts.ctensor, n) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) PolygammaOut(out Tensor, n int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgPolygammaOut(ptr, out.ctensor, n, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Pow(exponent Scalar, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgPow(ptr, ts.ctensor, exponent.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Pow1(exponent Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgPow1(ptr, ts.ctensor, exponent.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func Pow2(selfScalar Scalar, exponent Tensor)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgPow2(ptr, selfScalar.cscalar, exponent.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Pow_(exponent Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgPow_(ptr, ts.ctensor, exponent.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) Pow1_(exponent Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgPow1_(ptr, ts.ctensor, exponent.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) PowOut(out Tensor, exponent Scalar, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgPowOut(ptr, out.ctensor, ts.ctensor, exponent.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) PowOut1(out Tensor, exponent Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgPowOut1(ptr, out.ctensor, ts.ctensor, exponent.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func PowOut2(out Tensor, selfScalar Scalar, exponent Tensor)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgPowOut2(ptr, out.ctensor, selfScalar.cscalar, exponent.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Prelu(weight Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgPrelu(ptr, ts.ctensor, weight.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Prod(dtype gotch.DType, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgProd(ptr, ts.ctensor, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Prod1(dim int64, keepdim bool, dtype gotch.DType, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } +lib.AtgProd1(ptr, ts.ctensor, dim, ckeepdim, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) ProdOut(out Tensor, dim int64, keepdim bool, dtype gotch.DType, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } +lib.AtgProdOut(ptr, out.ctensor, ts.ctensor, dim, ckeepdim, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Put_(index Tensor, source Tensor, accumulate bool)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + caccumulate := int32(0) + if accumulate { caccumulate = int32(1) } +lib.AtgPut_(ptr, ts.ctensor, index.ctensor, source.ctensor, caccumulate) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) QPerChannelScales(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgQPerChannelScales(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) QPerChannelZeroPoints(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgQPerChannelZeroPoints(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) QuantizePerChannel(scales Tensor, zeroPoints Tensor, axis int64, dtype gotch.DType, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgQuantizePerChannel(ptr, ts.ctensor, scales.ctensor, zeroPoints.ctensor, axis, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) QuantizePerTensor(scale float64, zeroPoint int64, dtype gotch.DType, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgQuantizePerTensor(ptr, ts.ctensor, scale, zeroPoint, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func QuantizedBatchNorm(input Tensor, weight Tensor, bias Tensor, mean Tensor, vari Tensor, eps float64, outputScale float64, outputZeroPoint int64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgQuantizedBatchNorm(ptr, input.ctensor, weight.ctensor, bias.ctensor, mean.ctensor, vari.ctensor, eps, outputScale, outputZeroPoint) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func QuantizedGruCell(input Tensor, hx Tensor, wIh Tensor, wHh Tensor, bIh Tensor, bHh Tensor, packedIh Tensor, packedHh Tensor, colOffsetsIh Tensor, colOffsetsHh Tensor, scaleIh Scalar, scaleHh Scalar, zeroPointIh Scalar, zeroPointHh Scalar)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgQuantizedGruCell(ptr, input.ctensor, hx.ctensor, wIh.ctensor, wHh.ctensor, bIh.ctensor, bHh.ctensor, packedIh.ctensor, packedHh.ctensor, colOffsetsIh.ctensor, colOffsetsHh.ctensor, scaleIh.cscalar, scaleHh.cscalar, zeroPointIh.cscalar, zeroPointHh.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) QuantizedMaxPool2d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cceilMode := int32(0) + if ceilMode { cceilMode = int32(1) } +lib.AtgQuantizedMaxPool2d(ptr, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func QuantizedRnnReluCell(input Tensor, hx Tensor, wIh Tensor, wHh Tensor, bIh Tensor, bHh Tensor, packedIh Tensor, packedHh Tensor, colOffsetsIh Tensor, colOffsetsHh Tensor, scaleIh Scalar, scaleHh Scalar, zeroPointIh Scalar, zeroPointHh Scalar)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgQuantizedRnnReluCell(ptr, input.ctensor, hx.ctensor, wIh.ctensor, wHh.ctensor, bIh.ctensor, bHh.ctensor, packedIh.ctensor, packedHh.ctensor, colOffsetsIh.ctensor, colOffsetsHh.ctensor, scaleIh.cscalar, scaleHh.cscalar, zeroPointIh.cscalar, zeroPointHh.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func QuantizedRnnTanhCell(input Tensor, hx Tensor, wIh Tensor, wHh Tensor, bIh Tensor, bHh Tensor, packedIh Tensor, packedHh Tensor, colOffsetsIh Tensor, colOffsetsHh Tensor, scaleIh Scalar, scaleHh Scalar, zeroPointIh Scalar, zeroPointHh Scalar)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgQuantizedRnnTanhCell(ptr, input.ctensor, hx.ctensor, wIh.ctensor, wHh.ctensor, bIh.ctensor, bHh.ctensor, packedIh.ctensor, packedHh.ctensor, colOffsetsIh.ctensor, colOffsetsHh.ctensor, scaleIh.cscalar, scaleHh.cscalar, zeroPointIh.cscalar, zeroPointHh.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func Rand(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRand(ptr, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) RandLike(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRandLike(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func RandOut(out Tensor, size []int64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRandOut(ptr, out.ctensor, size, len(size)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func Randint(high int64, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRandint(ptr, high, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func Randint1(low int64, high int64, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRandint1(ptr, low, high, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) RandintLike(high int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRandintLike(ptr, ts.ctensor, high) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) RandintLike1(low int64, high int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRandintLike1(ptr, ts.ctensor, low, high) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func RandintOut(out Tensor, high int64, size []int64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRandintOut(ptr, out.ctensor, high, size, len(size)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func RandintOut1(out Tensor, low int64, high int64, size []int64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRandintOut1(ptr, out.ctensor, low, high, size, len(size)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func Randn(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRandn(ptr, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) RandnLike(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRandnLike(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func RandnOut(out Tensor, size []int64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRandnOut(ptr, out.ctensor, size, len(size)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Random_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRandom_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) Random1_(to int64)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRandom1_(ptr, ts.ctensor, to) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) Random2(from int64, to int64)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRandom2(ptr, ts.ctensor, from, to) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func Randperm(n int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRandperm(ptr, n, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func RandpermOut(out Tensor, n int64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRandpermOut(ptr, out.ctensor, n) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func Range(start Scalar, end Scalar, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRange(ptr, start.cscalar, end.cscalar, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func Range1(start Scalar, end Scalar, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRange1(ptr, start.cscalar, end.cscalar, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func RangeOut(out Tensor, start Scalar, end Scalar)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRangeOut(ptr, out.ctensor, start.cscalar, end.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Real(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReal(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Reciprocal(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReciprocal(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Reciprocal_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReciprocal_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) ReciprocalOut(out Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReciprocalOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) ReflectionPad1d(padding []int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReflectionPad1d(ptr, ts.ctensor, padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) ReflectionPad1dBackward(gradOutput Tensor, padding []int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReflectionPad1dBackward(ptr, gradOutput.ctensor, ts.ctensor, padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) ReflectionPad1dBackwardOut(gradInput Tensor, gradOutput Tensor, padding []int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReflectionPad1dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) ReflectionPad1dOut(out Tensor, padding []int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReflectionPad1dOut(ptr, out.ctensor, ts.ctensor, padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) ReflectionPad2d(padding []int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReflectionPad2d(ptr, ts.ctensor, padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) ReflectionPad2dBackward(gradOutput Tensor, padding []int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReflectionPad2dBackward(ptr, gradOutput.ctensor, ts.ctensor, padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) ReflectionPad2dBackwardOut(gradInput Tensor, gradOutput Tensor, padding []int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReflectionPad2dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) ReflectionPad2dOut(out Tensor, padding []int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReflectionPad2dOut(ptr, out.ctensor, ts.ctensor, padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Relu(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRelu(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Relu_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRelu_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) Remainder(other Scalar, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRemainder(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Remainder1(other Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRemainder1(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Remainder_(other Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRemainder_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) Remainder1_(other Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRemainder1_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) RemainderOut(out Tensor, other Scalar, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRemainderOut(ptr, out.ctensor, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) RemainderOut1(out Tensor, other Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRemainderOut1(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Renorm(p Scalar, dim int64, maxnorm Scalar, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRenorm(ptr, ts.ctensor, p.cscalar, dim, maxnorm.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Renorm_(p Scalar, dim int64, maxnorm Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRenorm_(ptr, ts.ctensor, p.cscalar, dim, maxnorm.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) RenormOut(out Tensor, p Scalar, dim int64, maxnorm Scalar, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRenormOut(ptr, out.ctensor, ts.ctensor, p.cscalar, dim, maxnorm.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Repeat(repeats []int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRepeat(ptr, ts.ctensor, repeats, len(repeats)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func RepeatInterleave(repeats Tensor)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRepeatInterleave(ptr, repeats.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) RepeatInterleave1(repeats Tensor, dim int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRepeatInterleave1(ptr, ts.ctensor, repeats.ctensor, dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) RepeatInterleave2(repeats int64, dim int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRepeatInterleave2(ptr, ts.ctensor, repeats, dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) ReplicationPad1d(padding []int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReplicationPad1d(ptr, ts.ctensor, padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) ReplicationPad1dBackward(gradOutput Tensor, padding []int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReplicationPad1dBackward(ptr, gradOutput.ctensor, ts.ctensor, padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) ReplicationPad1dBackwardOut(gradInput Tensor, gradOutput Tensor, padding []int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReplicationPad1dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) ReplicationPad1dOut(out Tensor, padding []int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReplicationPad1dOut(ptr, out.ctensor, ts.ctensor, padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) ReplicationPad2d(padding []int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReplicationPad2d(ptr, ts.ctensor, padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) ReplicationPad2dBackward(gradOutput Tensor, padding []int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReplicationPad2dBackward(ptr, gradOutput.ctensor, ts.ctensor, padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) ReplicationPad2dBackwardOut(gradInput Tensor, gradOutput Tensor, padding []int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReplicationPad2dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) ReplicationPad2dOut(out Tensor, padding []int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReplicationPad2dOut(ptr, out.ctensor, ts.ctensor, padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) ReplicationPad3d(padding []int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReplicationPad3d(ptr, ts.ctensor, padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) ReplicationPad3dBackward(gradOutput Tensor, padding []int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReplicationPad3dBackward(ptr, gradOutput.ctensor, ts.ctensor, padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) ReplicationPad3dBackwardOut(gradInput Tensor, gradOutput Tensor, padding []int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReplicationPad3dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) ReplicationPad3dOut(out Tensor, padding []int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReplicationPad3dOut(ptr, out.ctensor, ts.ctensor, padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) RequiresGrad_(requiresGrad bool)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + crequiresGrad := int32(0) + if requiresGrad { crequiresGrad = int32(1) } +lib.AtgRequiresGrad_(ptr, ts.ctensor, crequiresGrad) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) Reshape(shape []int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReshape(ptr, ts.ctensor, shape, len(shape)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) ReshapeAs(other Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgReshapeAs(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Resize_(size []int64)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgResize_(ptr, ts.ctensor, size, len(size)) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) ResizeAs_(theTemplate Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgResizeAs_(ptr, ts.ctensor, theTemplate.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) Rfft(signalNdim int64, normalized bool, onesided bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cnormalized := int32(0) + if normalized { cnormalized = int32(1) } +conesided := int32(0) + if onesided { conesided = int32(1) } +lib.AtgRfft(ptr, ts.ctensor, signalNdim, cnormalized, conesided) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func RnnReluCell(input Tensor, hx Tensor, wIh Tensor, wHh Tensor, bIh Tensor, bHh Tensor)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRnnReluCell(ptr, input.ctensor, hx.ctensor, wIh.ctensor, wHh.ctensor, bIh.ctensor, bHh.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func RnnTanhCell(input Tensor, hx Tensor, wIh Tensor, wHh Tensor, bIh Tensor, bHh Tensor)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRnnTanhCell(ptr, input.ctensor, hx.ctensor, wIh.ctensor, wHh.ctensor, bIh.ctensor, bHh.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Roll(shifts []int64, dims []int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRoll(ptr, ts.ctensor, shifts, len(shifts), dims, len(dims)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Rot90(k int64, dims []int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRot90(ptr, ts.ctensor, k, dims, len(dims)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Round(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRound(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Round_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRound_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) RoundOut(out Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRoundOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Rrelu(training bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ctraining := int32(0) + if training { ctraining = int32(1) } +lib.AtgRrelu(ptr, ts.ctensor, ctraining) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Rrelu_(training bool)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ctraining := int32(0) + if training { ctraining = int32(1) } +lib.AtgRrelu_(ptr, ts.ctensor, ctraining) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) RreluWithNoise(noise Tensor, training bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ctraining := int32(0) + if training { ctraining = int32(1) } +lib.AtgRreluWithNoise(ptr, ts.ctensor, noise.ctensor, ctraining) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) RreluWithNoise_(noise Tensor, training bool)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ctraining := int32(0) + if training { ctraining = int32(1) } +lib.AtgRreluWithNoise_(ptr, ts.ctensor, noise.ctensor, ctraining) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) RreluWithNoiseBackward(gradOutput Tensor, noise Tensor, lower Scalar, upper Scalar, training bool, selfIsResult bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ctraining := int32(0) + if training { ctraining = int32(1) } +cselfIsResult := int32(0) + if selfIsResult { cselfIsResult = int32(1) } +lib.AtgRreluWithNoiseBackward(ptr, gradOutput.ctensor, ts.ctensor, noise.ctensor, lower.cscalar, upper.cscalar, ctraining, cselfIsResult) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) RreluWithNoiseOut(out Tensor, noise Tensor, training bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ctraining := int32(0) + if training { ctraining = int32(1) } +lib.AtgRreluWithNoiseOut(ptr, out.ctensor, ts.ctensor, noise.ctensor, ctraining) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Rsqrt(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRsqrt(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Rsqrt_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRsqrt_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) RsqrtOut(out Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRsqrtOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Rsub(other Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRsub(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Rsub1(other Scalar, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRsub1(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func ScalarTensor(s Scalar, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgScalarTensor(ptr, s.cscalar, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Scatter(dim int64, index Tensor, src Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgScatter(ptr, ts.ctensor, dim, index.ctensor, src.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Scatter1(dim int64, index Tensor, value Scalar, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgScatter1(ptr, ts.ctensor, dim, index.ctensor, value.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Scatter_(dim int64, index Tensor, src Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgScatter_(ptr, ts.ctensor, dim, index.ctensor, src.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) Scatter1_(dim int64, index Tensor, value Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgScatter1_(ptr, ts.ctensor, dim, index.ctensor, value.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) ScatterAdd(dim int64, index Tensor, src Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgScatterAdd(ptr, ts.ctensor, dim, index.ctensor, src.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) ScatterAdd_(dim int64, index Tensor, src Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgScatterAdd_(ptr, ts.ctensor, dim, index.ctensor, src.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) Select(dim int64, index int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSelect(ptr, ts.ctensor, dim, index) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Selu(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSelu(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Selu_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSelu_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) Set_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSet_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) Set1_(source Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSet1_(ptr, ts.ctensor, source.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) SetRequiresGrad(r bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cr := int32(0) + if r { cr = int32(1) } +lib.AtgSetRequiresGrad(ptr, ts.ctensor, cr) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Sigmoid(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSigmoid(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Sigmoid_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSigmoid_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func SigmoidBackward(gradOutput Tensor, output Tensor)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSigmoidBackward(ptr, gradOutput.ctensor, output.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func SigmoidBackwardOut(gradInput Tensor, gradOutput Tensor, output Tensor)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSigmoidBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, output.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) SigmoidOut(out Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSigmoidOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Sign(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSign(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Sign_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSign_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) SignOut(out Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSignOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Sin(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSin(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Sin_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSin_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) SinOut(out Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSinOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Sinh(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSinh(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Sinh_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSinh_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) SinhOut(out Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSinhOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Slice(dim int64, start int64, end int64, step int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSlice(ptr, ts.ctensor, dim, start, end, step) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) SlowConv3d(weight Tensor, kernelSize []int64, bias Tensor, stride []int64, padding []int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSlowConv3d(ptr, ts.ctensor, weight.ctensor, kernelSize, len(kernelSize), bias.ctensor, stride, len(stride), padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) SlowConv3dOut(out Tensor, weight Tensor, kernelSize []int64, bias Tensor, stride []int64, padding []int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSlowConv3dOut(ptr, out.ctensor, ts.ctensor, weight.ctensor, kernelSize, len(kernelSize), bias.ctensor, stride, len(stride), padding, len(padding)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) SlowConvDilated2d(weight Tensor, kernelSize []int64, bias Tensor, stride []int64, padding []int64, dilation []int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSlowConvDilated2d(ptr, ts.ctensor, weight.ctensor, kernelSize, len(kernelSize), bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) SlowConvDilated3d(weight Tensor, kernelSize []int64, bias Tensor, stride []int64, padding []int64, dilation []int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSlowConvDilated3d(ptr, ts.ctensor, weight.ctensor, kernelSize, len(kernelSize), bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) SlowConvTranspose2d(weight Tensor, kernelSize []int64, bias Tensor, stride []int64, padding []int64, outputPadding []int64, dilation []int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSlowConvTranspose2d(ptr, ts.ctensor, weight.ctensor, kernelSize, len(kernelSize), bias.ctensor, stride, len(stride), padding, len(padding), outputPadding, len(outputPadding), dilation, len(dilation)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) SlowConvTranspose2dOut(out Tensor, weight Tensor, kernelSize []int64, bias Tensor, stride []int64, padding []int64, outputPadding []int64, dilation []int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSlowConvTranspose2dOut(ptr, out.ctensor, ts.ctensor, weight.ctensor, kernelSize, len(kernelSize), bias.ctensor, stride, len(stride), padding, len(padding), outputPadding, len(outputPadding), dilation, len(dilation)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) SlowConvTranspose3d(weight Tensor, kernelSize []int64, bias Tensor, stride []int64, padding []int64, outputPadding []int64, dilation []int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSlowConvTranspose3d(ptr, ts.ctensor, weight.ctensor, kernelSize, len(kernelSize), bias.ctensor, stride, len(stride), padding, len(padding), outputPadding, len(outputPadding), dilation, len(dilation)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) SlowConvTranspose3dOut(out Tensor, weight Tensor, kernelSize []int64, bias Tensor, stride []int64, padding []int64, outputPadding []int64, dilation []int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSlowConvTranspose3dOut(ptr, out.ctensor, ts.ctensor, weight.ctensor, kernelSize, len(kernelSize), bias.ctensor, stride, len(stride), padding, len(padding), outputPadding, len(outputPadding), dilation, len(dilation)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Smm(mat2 Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSmm(ptr, ts.ctensor, mat2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) SmoothL1Loss(target Tensor, reduction int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSmoothL1Loss(ptr, ts.ctensor, target.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) SmoothL1LossBackward(gradOutput Tensor, target Tensor, reduction int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSmoothL1LossBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) SmoothL1LossBackwardOut(gradInput Tensor, gradOutput Tensor, target Tensor, reduction int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSmoothL1LossBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) SmoothL1LossOut(out Tensor, target Tensor, reduction int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSmoothL1LossOut(ptr, out.ctensor, ts.ctensor, target.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) SoftMarginLoss(target Tensor, reduction int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSoftMarginLoss(ptr, ts.ctensor, target.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) SoftMarginLossBackward(gradOutput Tensor, target Tensor, reduction int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSoftMarginLossBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) SoftMarginLossBackwardOut(gradInput Tensor, gradOutput Tensor, target Tensor, reduction int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSoftMarginLossBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) SoftMarginLossOut(out Tensor, target Tensor, reduction int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSoftMarginLossOut(ptr, out.ctensor, ts.ctensor, target.ctensor, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Softmax(dim int64, dtype gotch.DType, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSoftmax(ptr, ts.ctensor, dim, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Softplus(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSoftplus(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) SoftplusBackward(gradOutput Tensor, beta Scalar, threshold Scalar, output Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSoftplusBackward(ptr, gradOutput.ctensor, ts.ctensor, beta.cscalar, threshold.cscalar, output.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) SoftplusBackwardOut(gradInput Tensor, gradOutput Tensor, beta Scalar, threshold Scalar, output Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSoftplusBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, beta.cscalar, threshold.cscalar, output.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) SoftplusOut(out Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSoftplusOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Softshrink(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSoftshrink(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) SoftshrinkBackward(gradOutput Tensor, lambd Scalar, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSoftshrinkBackward(ptr, gradOutput.ctensor, ts.ctensor, lambd.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) SoftshrinkBackwardOut(gradInput Tensor, gradOutput Tensor, lambd Scalar, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSoftshrinkBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, lambd.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) SoftshrinkOut(out Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSoftshrinkOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func SparseCooTensor(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSparseCooTensor(ptr, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func SparseCooTensor1(indices Tensor, values Tensor, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSparseCooTensor1(ptr, indices.ctensor, values.ctensor, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func SparseCooTensor2(indices Tensor, values Tensor, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSparseCooTensor2(ptr, indices.ctensor, values.ctensor, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) SparseMask(mask Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSparseMask(ptr, ts.ctensor, mask.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) SparseResize_(size []int64, sparseDim int64, denseDim int64)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSparseResize_(ptr, ts.ctensor, size, len(size), sparseDim, denseDim) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) SparseResizeAndClear_(size []int64, sparseDim int64, denseDim int64)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSparseResizeAndClear_(ptr, ts.ctensor, size, len(size), sparseDim, denseDim) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) Sqrt(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSqrt(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Sqrt_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSqrt_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) SqrtOut(out Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSqrtOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Square(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSquare(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Square_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSquare_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) Squeeze(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSqueeze(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Squeeze1(dim int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSqueeze1(ptr, ts.ctensor, dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Squeeze_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSqueeze_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) Squeeze1_(dim int64)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSqueeze1_(ptr, ts.ctensor, dim) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) Sspaddmm(mat1 Tensor, mat2 Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSspaddmm(ptr, ts.ctensor, mat1.ctensor, mat2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) SspaddmmOut(out Tensor, mat1 Tensor, mat2 Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSspaddmmOut(ptr, out.ctensor, ts.ctensor, mat1.ctensor, mat2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func Stack(tensors []Tensor, dim int64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var ctensors []lib.Ctensor + for _, t := range tensors {ctensors = append(ctensors, t.ctensor)} +lib.AtgStack(ptr, ctensors, len(ctensors), dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func StackOut(out Tensor, tensors []Tensor, dim int64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var ctensors []lib.Ctensor + for _, t := range tensors {ctensors = append(ctensors, t.ctensor)} +lib.AtgStackOut(ptr, out.ctensor, ctensors, len(ctensors), dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Std(unbiased bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cunbiased := int32(0) + if unbiased { cunbiased = int32(1) } +lib.AtgStd(ptr, ts.ctensor, cunbiased) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Std1(dim []int64, unbiased bool, keepdim bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cunbiased := int32(0) + if unbiased { cunbiased = int32(1) } +ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } +lib.AtgStd1(ptr, ts.ctensor, dim, len(dim), cunbiased, ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) StdOut(out Tensor, dim []int64, unbiased bool, keepdim bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cunbiased := int32(0) + if unbiased { cunbiased = int32(1) } +ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } +lib.AtgStdOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), cunbiased, ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Stft(nFft int64, hopLength int64, winLength int64, window Tensor, normalized bool, onesided bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cnormalized := int32(0) + if normalized { cnormalized = int32(1) } +conesided := int32(0) + if onesided { conesided = int32(1) } +lib.AtgStft(ptr, ts.ctensor, nFft, hopLength, winLength, window.ctensor, cnormalized, conesided) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Sub(other Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSub(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Sub1(other Scalar, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSub1(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Sub_(other Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSub_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) Sub1_(other Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSub1_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) SubOut(out Tensor, other Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSubOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Sum(dtype gotch.DType, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSum(ptr, ts.ctensor, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Sum1(dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } +lib.AtgSum1(ptr, ts.ctensor, dim, len(dim), ckeepdim, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) SumOut(out Tensor, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } +lib.AtgSumOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), ckeepdim, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) SumToSize(size []int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSumToSize(ptr, ts.ctensor, size, len(size)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) T(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgT(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) T_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgT_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) Take(index Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTake(ptr, ts.ctensor, index.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) TakeOut(out Tensor, index Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTakeOut(ptr, out.ctensor, ts.ctensor, index.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Tan(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTan(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Tan_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTan_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) TanOut(out Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTanOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Tanh(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTanh(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Tanh_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTanh_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func TanhBackward(gradOutput Tensor, output Tensor)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTanhBackward(ptr, gradOutput.ctensor, output.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func TanhBackwardOut(gradInput Tensor, gradOutput Tensor, output Tensor)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTanhBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, output.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) TanhOut(out Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTanhOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Tensordot(other Tensor, dimsSelf []int64, dimsOther []int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTensordot(ptr, ts.ctensor, other.ctensor, dimsSelf, len(dimsSelf), dimsOther, len(dimsOther)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Threshold(threshold Scalar, value Scalar, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgThreshold(ptr, ts.ctensor, threshold.cscalar, value.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Threshold_(threshold Scalar, value Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgThreshold_(ptr, ts.ctensor, threshold.cscalar, value.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) ThresholdBackward(gradOutput Tensor, threshold Scalar, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgThresholdBackward(ptr, gradOutput.ctensor, ts.ctensor, threshold.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) ThresholdOut(out Tensor, threshold Scalar, value Scalar, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgThresholdOut(ptr, out.ctensor, ts.ctensor, threshold.cscalar, value.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) To(device gotch.Device, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTo(ptr, ts.ctensor, device.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) To1(optionsKind gotch.DType, optionsDevice gotch.Device, nonBlocking bool, copy bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cnonBlocking := int32(0) + if nonBlocking { cnonBlocking = int32(1) } +ccopy := int32(0) + if copy { ccopy = int32(1) } +lib.AtgTo1(ptr, ts.ctensor, optionsKind.CInt(), optionsDevice.CInt(), cnonBlocking, ccopy) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) To2(dtype gotch.DType, nonBlocking bool, copy bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cnonBlocking := int32(0) + if nonBlocking { cnonBlocking = int32(1) } +ccopy := int32(0) + if copy { ccopy = int32(1) } +lib.AtgTo2(ptr, ts.ctensor, dtype.CInt(), cnonBlocking, ccopy) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) To3(other Tensor, nonBlocking bool, copy bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cnonBlocking := int32(0) + if nonBlocking { cnonBlocking = int32(1) } +ccopy := int32(0) + if copy { ccopy = int32(1) } +lib.AtgTo3(ptr, ts.ctensor, other.ctensor, cnonBlocking, ccopy) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) To4(device gotch.Device, dtype gotch.DType, nonBlocking bool, copy bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cnonBlocking := int32(0) + if nonBlocking { cnonBlocking = int32(1) } +ccopy := int32(0) + if copy { ccopy = int32(1) } +lib.AtgTo4(ptr, ts.ctensor, device.CInt(), dtype.CInt(), cnonBlocking, ccopy) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) ToDense(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgToDense(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func ToDenseBackward(grad Tensor, input Tensor)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgToDenseBackward(ptr, grad.ctensor, input.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) ToMkldnn(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgToMkldnn(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func ToMkldnnBackward(grad Tensor, input Tensor)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgToMkldnnBackward(ptr, grad.ctensor, input.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) ToSparse(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgToSparse(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) ToSparse1(sparseDim int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgToSparse1(ptr, ts.ctensor, sparseDim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Totype(scalarType gotch.DType, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTotype(ptr, ts.ctensor, scalarType.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Trace(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTrace(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Transpose(dim0 int64, dim1 int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTranspose(ptr, ts.ctensor, dim0, dim1) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Transpose_(dim0 int64, dim1 int64)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTranspose_(ptr, ts.ctensor, dim0, dim1) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func Trapz(y Tensor, x Tensor, dim int64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTrapz(ptr, y.ctensor, x.ctensor, dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func Trapz1(y Tensor, dx float64, dim int64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTrapz1(ptr, y.ctensor, dx, dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Tril(diagonal int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTril(ptr, ts.ctensor, diagonal) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Tril_(diagonal int64)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTril_(ptr, ts.ctensor, diagonal) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func TrilIndices(row int64, col int64, offset int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTrilIndices(ptr, row, col, offset, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) TrilOut(out Tensor, diagonal int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTrilOut(ptr, out.ctensor, ts.ctensor, diagonal) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func TripletMarginLoss(anchor Tensor, positive Tensor, negative Tensor, margin float64, p float64, eps float64, swap bool, reduction int64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cswap := int32(0) + if swap { cswap = int32(1) } +lib.AtgTripletMarginLoss(ptr, anchor.ctensor, positive.ctensor, negative.ctensor, margin, p, eps, cswap, reduction) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Triu(diagonal int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTriu(ptr, ts.ctensor, diagonal) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Triu_(diagonal int64)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTriu_(ptr, ts.ctensor, diagonal) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func TriuIndices(row int64, col int64, offset int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTriuIndices(ptr, row, col, offset, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) TriuOut(out Tensor, diagonal int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTriuOut(ptr, out.ctensor, ts.ctensor, diagonal) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) TrueDivide(other Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTrueDivide(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) TrueDivide1(other Scalar, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTrueDivide1(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) TrueDivide_(other Tensor)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTrueDivide_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) TrueDivide1_(other Scalar)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTrueDivide1_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) TrueDivideOut(out Tensor, other Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTrueDivideOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Trunc(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTrunc(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Trunc_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTrunc_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) TruncOut(out Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTruncOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) TypeAs(other Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTypeAs(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Unfold(dimension int64, size int64, step int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgUnfold(ptr, ts.ctensor, dimension, size, step) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Uniform_(from float64, to float64)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgUniform_(ptr, ts.ctensor, from, to) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) Unsqueeze(dim int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgUnsqueeze(ptr, ts.ctensor, dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Unsqueeze_(dim int64)(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgUnsqueeze_(ptr, ts.ctensor, dim) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func(ts Tensor) UpsampleBicubic2d(outputSize []int64, alignCorners bool, scalesH float64, scalesW float64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { calignCorners = int32(1) } +lib.AtgUpsampleBicubic2d(ptr, ts.ctensor, outputSize, len(outputSize), calignCorners, scalesH, scalesW) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func UpsampleBicubic2dBackward(gradOutput Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH float64, scalesW float64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { calignCorners = int32(1) } +lib.AtgUpsampleBicubic2dBackward(ptr, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, scalesH, scalesW) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func UpsampleBicubic2dBackwardOut(gradInput Tensor, gradOutput Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH float64, scalesW float64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { calignCorners = int32(1) } +lib.AtgUpsampleBicubic2dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, scalesH, scalesW) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) UpsampleBicubic2dOut(out Tensor, outputSize []int64, alignCorners bool, scalesH float64, scalesW float64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { calignCorners = int32(1) } +lib.AtgUpsampleBicubic2dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), calignCorners, scalesH, scalesW) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) UpsampleBilinear2d(outputSize []int64, alignCorners bool, scalesH float64, scalesW float64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { calignCorners = int32(1) } +lib.AtgUpsampleBilinear2d(ptr, ts.ctensor, outputSize, len(outputSize), calignCorners, scalesH, scalesW) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func UpsampleBilinear2dBackward(gradOutput Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH float64, scalesW float64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { calignCorners = int32(1) } +lib.AtgUpsampleBilinear2dBackward(ptr, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, scalesH, scalesW) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func UpsampleBilinear2dBackwardOut(gradInput Tensor, gradOutput Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH float64, scalesW float64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { calignCorners = int32(1) } +lib.AtgUpsampleBilinear2dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, scalesH, scalesW) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) UpsampleBilinear2dOut(out Tensor, outputSize []int64, alignCorners bool, scalesH float64, scalesW float64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { calignCorners = int32(1) } +lib.AtgUpsampleBilinear2dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), calignCorners, scalesH, scalesW) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) UpsampleLinear1d(outputSize []int64, alignCorners bool, scales float64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { calignCorners = int32(1) } +lib.AtgUpsampleLinear1d(ptr, ts.ctensor, outputSize, len(outputSize), calignCorners, scales) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func UpsampleLinear1dBackward(gradOutput Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scales float64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { calignCorners = int32(1) } +lib.AtgUpsampleLinear1dBackward(ptr, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, scales) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func UpsampleLinear1dBackwardOut(gradInput Tensor, gradOutput Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scales float64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { calignCorners = int32(1) } +lib.AtgUpsampleLinear1dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, scales) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) UpsampleLinear1dOut(out Tensor, outputSize []int64, alignCorners bool, scales float64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { calignCorners = int32(1) } +lib.AtgUpsampleLinear1dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), calignCorners, scales) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) UpsampleNearest1d(outputSize []int64, scales float64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgUpsampleNearest1d(ptr, ts.ctensor, outputSize, len(outputSize), scales) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func UpsampleNearest1dBackward(gradOutput Tensor, outputSize []int64, inputSize []int64, scales float64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgUpsampleNearest1dBackward(ptr, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), scales) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func UpsampleNearest1dBackwardOut(gradInput Tensor, gradOutput Tensor, outputSize []int64, inputSize []int64, scales float64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgUpsampleNearest1dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), scales) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) UpsampleNearest1dOut(out Tensor, outputSize []int64, scales float64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgUpsampleNearest1dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), scales) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) UpsampleNearest2d(outputSize []int64, scalesH float64, scalesW float64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgUpsampleNearest2d(ptr, ts.ctensor, outputSize, len(outputSize), scalesH, scalesW) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func UpsampleNearest2dBackward(gradOutput Tensor, outputSize []int64, inputSize []int64, scalesH float64, scalesW float64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgUpsampleNearest2dBackward(ptr, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), scalesH, scalesW) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func UpsampleNearest2dBackwardOut(gradInput Tensor, gradOutput Tensor, outputSize []int64, inputSize []int64, scalesH float64, scalesW float64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgUpsampleNearest2dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), scalesH, scalesW) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) UpsampleNearest2dOut(out Tensor, outputSize []int64, scalesH float64, scalesW float64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgUpsampleNearest2dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), scalesH, scalesW) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) UpsampleNearest3d(outputSize []int64, scalesD float64, scalesH float64, scalesW float64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgUpsampleNearest3d(ptr, ts.ctensor, outputSize, len(outputSize), scalesD, scalesH, scalesW) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func UpsampleNearest3dBackward(gradOutput Tensor, outputSize []int64, inputSize []int64, scalesD float64, scalesH float64, scalesW float64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgUpsampleNearest3dBackward(ptr, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), scalesD, scalesH, scalesW) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func UpsampleNearest3dBackwardOut(gradInput Tensor, gradOutput Tensor, outputSize []int64, inputSize []int64, scalesD float64, scalesH float64, scalesW float64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgUpsampleNearest3dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), scalesD, scalesH, scalesW) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) UpsampleNearest3dOut(out Tensor, outputSize []int64, scalesD float64, scalesH float64, scalesW float64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgUpsampleNearest3dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), scalesD, scalesH, scalesW) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) UpsampleTrilinear3d(outputSize []int64, alignCorners bool, scalesD float64, scalesH float64, scalesW float64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { calignCorners = int32(1) } +lib.AtgUpsampleTrilinear3d(ptr, ts.ctensor, outputSize, len(outputSize), calignCorners, scalesD, scalesH, scalesW) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func UpsampleTrilinear3dBackward(gradOutput Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesD float64, scalesH float64, scalesW float64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { calignCorners = int32(1) } +lib.AtgUpsampleTrilinear3dBackward(ptr, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, scalesD, scalesH, scalesW) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func UpsampleTrilinear3dBackwardOut(gradInput Tensor, gradOutput Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesD float64, scalesH float64, scalesW float64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { calignCorners = int32(1) } +lib.AtgUpsampleTrilinear3dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, scalesD, scalesH, scalesW) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) UpsampleTrilinear3dOut(out Tensor, outputSize []int64, alignCorners bool, scalesD float64, scalesH float64, scalesW float64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { calignCorners = int32(1) } +lib.AtgUpsampleTrilinear3dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), calignCorners, scalesD, scalesH, scalesW) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Values(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgValues(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Var(unbiased bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cunbiased := int32(0) + if unbiased { cunbiased = int32(1) } +lib.AtgVar(ptr, ts.ctensor, cunbiased) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Var1(dim []int64, unbiased bool, keepdim bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cunbiased := int32(0) + if unbiased { cunbiased = int32(1) } +ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } +lib.AtgVar1(ptr, ts.ctensor, dim, len(dim), cunbiased, ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) VarOut(out Tensor, dim []int64, unbiased bool, keepdim bool, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cunbiased := int32(0) + if unbiased { cunbiased = int32(1) } +ckeepdim := int32(0) + if keepdim { ckeepdim = int32(1) } +lib.AtgVarOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), cunbiased, ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) View(size []int64, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgView(ptr, ts.ctensor, size, len(size)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) ViewAs(other Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgViewAs(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Where1(condition Tensor, other Tensor, del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgWhere1(ptr, condition.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) Zero_()(err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgZero_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func Zeros(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgZeros(ptr, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func(ts Tensor) ZerosLike(del bool)(retVal Tensor, err error) { +if del { defer ts.MustDrop() } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgZerosLike(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} + +func ZerosOut(out Tensor, size []int64)(retVal Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgZerosOut(ptr, out.ctensor, size, len(size)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = Tensor{ctensor: *ptr} + + return retVal, err +} +// End of implementing Tensor ================================= diff --git a/tensor/tensor.go b/tensor/tensor.go index 74a9069..fd6de1e 100644 --- a/tensor/tensor.go +++ b/tensor/tensor.go @@ -55,6 +55,7 @@ func (ts Tensor) Size() (retVal []int64, err error) { } retVal = decodeSize(szPtr, dim) + return retVal, nil } @@ -63,6 +64,7 @@ func (ts Tensor) MustSize() (retVal []int64) { if err != nil { log.Fatal(err) } + return retVal } @@ -295,33 +297,34 @@ func (ts Tensor) Device() (retVal gotch.Device, err error) { return device.OfCInt(int32(cInt)), nil } -func (ts Tensor) Eq1(other Tensor, del bool) (retVal Tensor, err error) { - - // Get a C null pointer - // https://stackoverflow.com/a/2022369 - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - if del { - defer ts.MustDrop() - } - - lib.AtgEq1(ptr, ts.ctensor, other.ctensor) - if err = TorchErr(); err != nil { - return retVal, err - } - - return Tensor{ctensor: *ptr}, nil - -} - -func (ts Tensor) MustEq1(other Tensor, del bool) (retVal Tensor) { - retVal, err := ts.Eq1(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - +/* + * func (ts Tensor) Eq1(other Tensor, del bool) (retVal Tensor, err error) { + * + * // Get a C null pointer + * // https://stackoverflow.com/a/2022369 + * ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + * if del { + * defer ts.MustDrop() + * } + * + * lib.AtgEq1(ptr, ts.ctensor, other.ctensor) + * if err = TorchErr(); err != nil { + * return retVal, err + * } + * + * return Tensor{ctensor: *ptr}, nil + * + * } + * + * func (ts Tensor) MustEq1(other Tensor, del bool) (retVal Tensor) { + * retVal, err := ts.Eq1(other, del) + * if err != nil { + * log.Fatal(err) + * } + * + * return retVal + * } + * */ // Float64Value returns a float value on tensors holding a single element. // An error is returned otherwise. // double at_double_value_at_indexes(tensor, int64_t *indexes, int indexes_len); @@ -440,7 +443,7 @@ func (ts Tensor) IsSparse() (retVal bool, err error) { // ZeroGrad zeroes the gradient tensor attached to this tensor if defined. func (ts Tensor) ZeroGrad() { - grad := ts.MustGrad() + grad := ts.MustGrad(false) if grad.MustDefined() { grad.Detach_() grad.Zero_() @@ -1022,8 +1025,8 @@ func (r Reduction) ToInt() (retVal int) { return } -// Values returns values of tensor in a slice of float64. -func (ts Tensor) Values() []float64 { +// Float64Values returns values of tensor in a slice of float64. +func (ts Tensor) Float64Values() []float64 { numel := ts.Numel() vec := make([]float64, numel) @@ -1102,5 +1105,5 @@ func (ts Tensor) Swish() (retVal Tensor) { } func (ts Tensor) AvgPool2DDefault(ksize int64, del bool) (retVal Tensor) { - return ts.MustAvgPool2D([]int64{ksize, ksize}, []int64{ksize, ksize}, []int64{0, 0}, false, true, 1, del) + return ts.MustAvgPool2d([]int64{ksize, ksize}, []int64{ksize, ksize}, []int64{0, 0}, false, true, 1, del) } diff --git a/tensor/tensor.go1 b/tensor/tensor.go1 new file mode 100644 index 0000000..9c5949d --- /dev/null +++ b/tensor/tensor.go1 @@ -0,0 +1,35 @@ +package tensor + +import ( + "log" + + lib "github.com/sugarme/gotch/libtch" +) + +type Tensor struct { + ctensor lib.Ctensor +} + +func (ts Tensor) Print() { + lib.AtPrint(ts.ctensor) + if err := TorchErr(); err != nil { + log.Fatal(err) + } +} + +// Drop drops (frees) the tensor +func (ts Tensor) Drop() (err error) { + lib.AtFree(ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return nil +} + +// MustDrop drops the tensor. It will be panic if error +func (ts Tensor) MustDrop() { + if err := ts.Drop(); err != nil { + log.Fatal(err) + } +} diff --git a/tensor/tensor_test.go b/tensor/tensor_test.go index b90f9b8..a218185 100644 --- a/tensor/tensor_test.go +++ b/tensor/tensor_test.go @@ -8,12 +8,26 @@ import ( ts "github.com/sugarme/gotch/tensor" ) +func TestTensorInit(t *testing.T) { + tensor := ts.MustArange1(ts.IntScalar(1), ts.IntScalar(5), gotch.Int64, gotch.CPU) + + tensor.Print() + + want := []float64{1, 2, 3, 4} + got := tensor.Float64Values() + + if !reflect.DeepEqual(want, got) { + t.Errorf("Expected tensor values: %v\n", want) + t.Errorf("Got tensor values: %v\n", got) + } +} + func TestInplaceAssign(t *testing.T) { tensor := ts.MustOfSlice([]int64{3, 1, 4, 1, 5}) - tensor.Add1_(ts.IntScalar(1)) - tensor.Mul1_(ts.IntScalar(2)) - tensor.Sub1_(ts.IntScalar(1)) + tensor.MustAdd1_(ts.IntScalar(1)) + tensor.MustMul1_(ts.IntScalar(2)) + tensor.MustSub1_(ts.IntScalar(1)) want := []int64{7, 3, 9, 3, 11} got := tensor.Vals() @@ -83,5 +97,3 @@ func TestIter(t *testing.T) { t.Errorf("Got tensor values: %v\n", got1) } } - -// TODO: more tests diff --git a/vision/alexnet.go b/vision/alexnet.go index 436f13f..556acc4 100644 --- a/vision/alexnet.go +++ b/vision/alexnet.go @@ -17,7 +17,7 @@ func anConv2d(p nn.Path, cIn, cOut, ksize, padding, stride int64) (retVal nn.Con } func anMaxPool2d(xs ts.Tensor, ksize, stride int64) (retVal ts.Tensor) { - return xs.MustMaxPool2D([]int64{ksize, ksize}, []int64{stride, stride}, []int64{0, 0}, []int64{1, 1}, false, false) + return xs.MustMaxPool2d([]int64{ksize, ksize}, []int64{stride, stride}, []int64{0, 0}, []int64{1, 1}, false, false) } func features(p nn.Path) (retVal ts.ModuleT) { @@ -68,7 +68,7 @@ func classifier(p nn.Path, nclasses int64) (retVal ts.ModuleT) { seq := nn.SeqT() seq.AddFnT(nn.NewFuncT(func(xs ts.Tensor, train bool) ts.Tensor { - return xs.MustDropout(0.5, train, false) + return ts.MustDropout(xs, 0.5, train) })) seq.Add(nn.NewLinear(p.Sub("1"), 256*6*6, 4096, nn.DefaultLinearConfig())) @@ -78,7 +78,7 @@ func classifier(p nn.Path, nclasses int64) (retVal ts.ModuleT) { })) seq.AddFnT(nn.NewFuncT(func(xs ts.Tensor, train bool) ts.Tensor { - return xs.MustDropout(0.5, train, false) + return ts.MustDropout(xs, 0.5, train) })) seq.Add(nn.NewLinear(p.Sub("4"), 4096, 4096, nn.DefaultLinearConfig())) @@ -98,7 +98,7 @@ func AlexNet(p nn.Path, nclasses int64) (retVal ts.ModuleT) { seq.Add(features(p.Sub("features"))) seq.AddFn(nn.NewFunc(func(xs ts.Tensor) ts.Tensor { - tmp1 := xs.MustAdaptiveAvgPool2D([]int64{6, 6}) + tmp1 := xs.MustAdaptiveAvgPool2d([]int64{6, 6}, false) res := tmp1.FlatView() tmp1.MustDrop() return res diff --git a/vision/cifar.go b/vision/cifar.go index bf8a16c..2d6cfb6 100644 --- a/vision/cifar.go +++ b/vision/cifar.go @@ -45,8 +45,8 @@ func readFile(filename string) (imagesTs ts.Tensor, labelsTs ts.Tensor) { log.Fatal(err) } - images := ts.MustZeros([]int64{samplesPerFile, cfC, cfH, cfW}, gotch.Float.CInt(), gotch.CPU.CInt()) - labels := ts.MustZeros([]int64{samplesPerFile}, gotch.Int64.CInt(), gotch.CPU.CInt()) + images := ts.MustZeros([]int64{samplesPerFile, cfC, cfH, cfW}, gotch.Float, gotch.CPU) + labels := ts.MustZeros([]int64{samplesPerFile}, gotch.Int64, gotch.CPU) for idx := 0; idx < int(samplesPerFile); idx++ { contentOffset := int(bytesPerImage) * idx @@ -101,8 +101,8 @@ func CFLoadDir(dir string) (retVal Dataset) { } return Dataset{ - TrainImages: ts.MustCat(trainImages, 0, true), - TrainLabels: ts.MustCat(trainLabels, 0, true), + TrainImages: ts.MustCat(trainImages, 0), + TrainLabels: ts.MustCat(trainLabels, 0), TestImages: testImages, TestLabels: testLabels, Labels: 10, diff --git a/vision/dataset.go b/vision/dataset.go index beadd7d..1a4d211 100644 --- a/vision/dataset.go +++ b/vision/dataset.go @@ -57,7 +57,7 @@ func RandomFlip(t ts.Tensor) (retVal ts.Tensor) { if rand.Float64() == 1.0 { src = tView } else { - src = tView.MustFlip([]int64{2}) + src = tView.MustFlip([]int64{2}, false) } tView.MustDrop() @@ -82,7 +82,7 @@ func RandomCrop(t ts.Tensor, pad int64) (retVal ts.Tensor) { szH := size[2] szW := size[3] - padded := t.MustReflectionPad2d([]int64{pad, pad, pad, pad}) + padded := t.MustReflectionPad2d([]int64{pad, pad, pad, pad}, false) output, err := t.ZerosLike(false) if err != nil { log.Fatal(err) diff --git a/vision/densenet.go b/vision/densenet.go index 9193f60..c9ba4fe 100644 --- a/vision/densenet.go +++ b/vision/densenet.go @@ -39,7 +39,7 @@ func denseLayer(p nn.Path, cIn, bnSize, growth int64) (retVal ts.ModuleT) { ys := ys5.Apply(conv2) ys5.MustDrop() - res := ts.MustCat([]ts.Tensor{xs, ys}, 1, false) + res := ts.MustCat([]ts.Tensor{xs, ys}, 1) ys.MustDrop() return res @@ -84,7 +84,7 @@ func densenet(p nn.Path, cIn, cOut, bnSize int64, blockConfig []int64, growth in seq.AddFn(nn.NewFunc(func(xs ts.Tensor) ts.Tensor { tmp := xs.MustRelu(false) - return tmp.MustMaxPool2D([]int64{3, 3}, []int64{2, 2}, []int64{1, 1}, []int64{1, 1}, false, true) + return tmp.MustMaxPool2d([]int64{3, 3}, []int64{2, 2}, []int64{1, 1}, []int64{1, 1}, false, true) })) nfeat := cIn @@ -103,7 +103,7 @@ func densenet(p nn.Path, cIn, cOut, bnSize int64, blockConfig []int64, growth in seq.AddFn(nn.NewFunc(func(xs ts.Tensor) ts.Tensor { tmp1 := xs.MustRelu(false) - tmp2 := tmp1.MustAvgPool2D([]int64{7, 7}, []int64{1, 1}, []int64{0, 0}, false, true, 1, true) + tmp2 := tmp1.MustAvgPool2d([]int64{7, 7}, []int64{1, 1}, []int64{0, 0}, false, true, 1, true) res := tmp2.FlatView() tmp2.MustDrop() return res diff --git a/vision/efficientnet.go b/vision/efficientnet.go index e209aaa..0aa482a 100644 --- a/vision/efficientnet.go +++ b/vision/efficientnet.go @@ -218,7 +218,7 @@ func block(p nn.Path, args BlockArgs) (retVal ts.ModuleT) { if args.SeRatio == 0 { ys4 = ys3 } else { - tmp1 := ys3.MustAdaptiveAvgPool2D([]int64{1, 1}) + tmp1 := ys3.MustAdaptiveAvgPool2d([]int64{1, 1}, false) tmp2 := tmp1.ApplyT(se, train) tmp1.MustDrop() tmp3 := tmp2.MustSigmoid(true) @@ -288,7 +288,7 @@ func efficientnet(p nn.Path, params params, nclasses int64) (retVal ts.ModuleT) classifier := nn.SeqT() classifier.AddFnT(nn.NewFuncT(func(xs ts.Tensor, train bool) ts.Tensor { - return xs.MustDropout(0.2, train, false) + return ts.MustDropout(xs, 0.2, train) })) classifier.Add(nn.NewLinear(p.Sub("_fc"), outC, nclasses, nn.DefaultLinearConfig())) @@ -306,7 +306,7 @@ func efficientnet(p nn.Path, params params, nclasses int64) (retVal ts.ModuleT) tmp5.MustDrop() tmp7 := tmp6.Swish() tmp6.MustDrop() - tmp8 := tmp7.MustAdaptiveAvgPool2D([]int64{1, 1}) + tmp8 := tmp7.MustAdaptiveAvgPool2d([]int64{1, 1}, false) tmp7.MustDrop() tmp9 := tmp8.MustSqueeze1(-1, true) tmp10 := tmp9.MustSqueeze1(-1, true) diff --git a/vision/imagenet.go b/vision/imagenet.go index f27431b..d4c1339 100644 --- a/vision/imagenet.go +++ b/vision/imagenet.go @@ -236,7 +236,7 @@ func (in ImageNet) LoadFromDir(path string) (retVal Dataset, err error) { ntrainTs := trainTs.MustSize()[0] trainImages = append(trainImages, trainTs) - trainLabelOnes := ts.MustOnes([]int64{ntrainTs}, gotch.Int64.CInt(), gotch.CPU.CInt()) + trainLabelOnes := ts.MustOnes([]int64{ntrainTs}, gotch.Int64, gotch.CPU) trainLabels = append(trainLabels, trainLabelOnes.MustMul1(ts.IntScalar(labelIndex), true)) // test @@ -249,15 +249,15 @@ func (in ImageNet) LoadFromDir(path string) (retVal Dataset, err error) { ntestTs := testTs.MustSize()[0] testImages = append(testImages, testTs) - testLabelOnes := ts.MustOnes([]int64{ntestTs}, gotch.Int64.CInt(), gotch.CPU.CInt()) + testLabelOnes := ts.MustOnes([]int64{ntestTs}, gotch.Int64, gotch.CPU) testLabels = append(testLabels, testLabelOnes.MustMul1(ts.IntScalar(labelIndex), true)) } return Dataset{ - TrainImages: ts.MustCat(trainImages, 0, true), - TrainLabels: ts.MustCat(trainLabels, 0, true), - TestImages: ts.MustCat(testImages, 0, true), - TestLabels: ts.MustCat(testLabels, 0, true), + TrainImages: ts.MustCat(trainImages, 0), + TrainLabels: ts.MustCat(trainLabels, 0), + TestImages: ts.MustCat(testImages, 0), + TestLabels: ts.MustCat(testLabels, 0), Labels: int64(len(classes)), }, nil } @@ -1301,8 +1301,8 @@ func (in ImageNet) Top(input ts.Tensor, k int64) (retVal []TopItem) { var topItems []TopItem - vals := valsTs.Values() - idxs := idxsTs.Values() + vals := valsTs.Float64Values() + idxs := idxsTs.Float64Values() for i := 0; i < int(k); i++ { val := vals[i] diff --git a/vision/inception.go b/vision/inception.go index cb82260..b44e755 100644 --- a/vision/inception.go +++ b/vision/inception.go @@ -53,7 +53,7 @@ func convBn2(p nn.Path, cIn, cOut int64, ksize []int64, pad []int64) (retVal ts. } func inMaxPool2D(xs ts.Tensor, ksize, stride int64) (retVal ts.Tensor) { - return xs.MustMaxPool2D([]int64{ksize, ksize}, []int64{stride, stride}, []int64{0, 0}, []int64{1, 1}, false, false) + return xs.MustMaxPool2d([]int64{ksize, ksize}, []int64{stride, stride}, []int64{0, 0}, []int64{1, 1}, false, false) } func inceptionA(p nn.Path, cIn, cPool int64) (retVal ts.ModuleT) { @@ -78,10 +78,10 @@ func inceptionA(p nn.Path, cIn, cPool int64) (retVal ts.ModuleT) { b3Ts := b3Tmp2.ApplyT(b33, train) b3Tmp2.MustDrop() - bpoolTmp := xs.MustAvgPool2D([]int64{3, 3}, []int64{1, 1}, []int64{1, 1}, false, true, 9, false) + bpoolTmp := xs.MustAvgPool2d([]int64{3, 3}, []int64{1, 1}, []int64{1, 1}, false, true, 9, false) bpoolTs := bpoolTmp.ApplyT(bpool, train) - res := ts.MustCat([]ts.Tensor{b1Ts, b2Ts, b3Ts, bpoolTs}, 1, true) + res := ts.MustCat([]ts.Tensor{b1Ts, b2Ts, b3Ts, bpoolTs}, 1) return res }) @@ -104,7 +104,7 @@ func inceptionB(p nn.Path, cIn int64) (retVal ts.ModuleT) { bpoolTs := inMaxPool2D(xs, 3, 2) - res := ts.MustCat([]ts.Tensor{b1Ts, b2Ts, bpoolTs}, 1, true) + res := ts.MustCat([]ts.Tensor{b1Ts, b2Ts, bpoolTs}, 1) return res }) @@ -145,10 +145,10 @@ func inceptionC(p nn.Path, cIn int64, c7 int64) (retVal ts.ModuleT) { b3Ts := b3Tmp4.ApplyT(b35, train) b3Tmp4.MustDrop() - bpTmp1 := xs.MustAvgPool2D([]int64{3, 3}, []int64{1, 1}, []int64{1, 1}, false, true, 9, false) + bpTmp1 := xs.MustAvgPool2d([]int64{3, 3}, []int64{1, 1}, []int64{1, 1}, false, true, 9, false) bpoolTs := bpTmp1.ApplyT(bpool, train) - res = ts.MustCat([]ts.Tensor{b1Ts, b2Ts, b3Ts, bpoolTs}, 1, true) + res = ts.MustCat([]ts.Tensor{b1Ts, b2Ts, b3Ts, bpoolTs}, 1) return res @@ -180,7 +180,7 @@ func inceptionD(p nn.Path, cIn int64) (retVal ts.ModuleT) { bpoolTs := inMaxPool2D(xs, 3, 2) - return ts.MustCat([]ts.Tensor{b1Ts, b2Ts, bpoolTs}, 1, true) + return ts.MustCat([]ts.Tensor{b1Ts, b2Ts, bpoolTs}, 1) }) } @@ -205,19 +205,19 @@ func inceptionE(p nn.Path, cIn int64) (retVal ts.ModuleT) { b2Tmp := xs.ApplyT(b21, train) b2aTs := b2Tmp.ApplyT(b22a, train) b2bTs := b2Tmp.ApplyT(b22b, train) - b2Ts := ts.MustCat([]ts.Tensor{b2aTs, b2bTs}, 1, true) + b2Ts := ts.MustCat([]ts.Tensor{b2aTs, b2bTs}, 1) b3Tmp1 := xs.ApplyT(b31, train) b3Tmp2 := b3Tmp1.ApplyT(b32, train) b3Tmp1.MustDrop() b3aTs := b3Tmp2.ApplyT(b33a, train) b3bTs := b3Tmp2.ApplyT(b33b, train) - b3Ts := ts.MustCat([]ts.Tensor{b3aTs, b3bTs}, 1, true) + b3Ts := ts.MustCat([]ts.Tensor{b3aTs, b3bTs}, 1) - bpTmp1 := xs.MustAvgPool2D([]int64{3, 3}, []int64{1, 1}, []int64{1, 1}, false, true, 9, false) + bpTmp1 := xs.MustAvgPool2d([]int64{3, 3}, []int64{1, 1}, []int64{1, 1}, false, true, 9, false) bpoolTs := bpTmp1.ApplyT(bpool, train) - return ts.MustCat([]ts.Tensor{b1Ts, b2Ts, b3Ts, bpoolTs}, 1, true) + return ts.MustCat([]ts.Tensor{b1Ts, b2Ts, b3Ts, bpoolTs}, 1) }) } @@ -263,10 +263,10 @@ func InceptionV3(p nn.Path, nclasses int64) (retVal ts.ModuleT) { seq.Add(inceptionE(p.Sub("Mixed_7c"), 2048)) seq.AddFnT(nn.NewFuncT(func(xs ts.Tensor, train bool) ts.Tensor { - tmp1 := xs.MustAdaptiveAvgPool2D([]int64{1, 1}) - tmp2 := tmp1.MustDropout(0.5, train, true) + tmp1 := xs.MustAdaptiveAvgPool2d([]int64{1, 1}, false) + tmp2 := ts.MustDropout(tmp1, 0.5, train) + tmp1.MustDrop() res := tmp2.FlatView() - tmp2.MustDrop() return res })) diff --git a/vision/mobilenet.go b/vision/mobilenet.go index 100b6a7..f433f6f 100644 --- a/vision/mobilenet.go +++ b/vision/mobilenet.go @@ -109,7 +109,7 @@ func MobileNetV2(p nn.Path, nclasses int64) (retVal ts.ModuleT) { classifier := nn.SeqT() classifier.AddFnT(nn.NewFuncT(func(xs ts.Tensor, train bool) ts.Tensor { - return xs.MustDropout(0.5, train, false) + return ts.MustDropout(xs, 0.5, train) })) classifier.Add(nn.NewLinear(cp.Sub("1"), 1280, nclasses, nn.DefaultLinearConfig())) diff --git a/vision/resnet.go b/vision/resnet.go index 3748fda..56493db 100644 --- a/vision/resnet.go +++ b/vision/resnet.go @@ -92,7 +92,7 @@ func resnet(path nn.Path, nclasses int64, c1, c2, c3, c4 int64) (retVal nn.FuncT bn1 := c1.ApplyT(bn1, train) c1.MustDrop() relu := bn1.MustRelu(true) - maxpool := relu.MustMaxPool2D([]int64{3, 3}, []int64{2, 2}, []int64{1, 1}, []int64{1, 1}, false, true) + maxpool := relu.MustMaxPool2d([]int64{3, 3}, []int64{2, 2}, []int64{1, 1}, []int64{1, 1}, false, true) l1 := maxpool.ApplyT(layer1, train) l2 := l1.ApplyT(layer2, train) l1.MustDrop() @@ -100,7 +100,7 @@ func resnet(path nn.Path, nclasses int64, c1, c2, c3, c4 int64) (retVal nn.FuncT l2.MustDrop() l4 := l3.ApplyT(layer4, train) l3.MustDrop() - avgpool := l4.MustAdaptiveAvgPool2D([]int64{1, 1}) + avgpool := l4.MustAdaptiveAvgPool2d([]int64{1, 1}, false) l4.MustDrop() fv := avgpool.FlatView() avgpool.MustDrop() @@ -118,7 +118,7 @@ func resnet(path nn.Path, nclasses int64, c1, c2, c3, c4 int64) (retVal nn.FuncT bn1 := c1.ApplyT(bn1, train) c1.MustDrop() relu := bn1.MustRelu(true) - maxpool := relu.MustMaxPool2D([]int64{3, 3}, []int64{2, 2}, []int64{1, 1}, []int64{1, 1}, false, true) + maxpool := relu.MustMaxPool2d([]int64{3, 3}, []int64{2, 2}, []int64{1, 1}, []int64{1, 1}, false, true) l1 := maxpool.ApplyT(layer1, train) maxpool.MustDrop() l2 := l1.ApplyT(layer2, train) @@ -127,7 +127,7 @@ func resnet(path nn.Path, nclasses int64, c1, c2, c3, c4 int64) (retVal nn.FuncT l2.MustDrop() l4 := l3.ApplyT(layer4, train) l3.MustDrop() - avgpool := l4.MustAdaptiveAvgPool2D([]int64{1, 1}) + avgpool := l4.MustAdaptiveAvgPool2d([]int64{1, 1}, false) l4.MustDrop() retVal = avgpool.FlatView() avgpool.MustDrop() @@ -215,7 +215,7 @@ func bottleneckResnet(path nn.Path, nclasses int64, c1, c2, c3, c4 int64) (retVa bn1 := c1.ApplyT(bn1, train) c1.MustDrop() relu := bn1.MustRelu(true) - maxpool := relu.MustMaxPool2D([]int64{3, 3}, []int64{2, 2}, []int64{1, 1}, []int64{1, 1}, false, true) + maxpool := relu.MustMaxPool2d([]int64{3, 3}, []int64{2, 2}, []int64{1, 1}, []int64{1, 1}, false, true) l1 := maxpool.ApplyT(layer1, train) l2 := l1.ApplyT(layer2, train) l1.MustDrop() @@ -223,7 +223,7 @@ func bottleneckResnet(path nn.Path, nclasses int64, c1, c2, c3, c4 int64) (retVa l2.MustDrop() l4 := l3.ApplyT(layer4, train) l3.MustDrop() - avgpool := l4.MustAdaptiveAvgPool2D([]int64{1, 1}) + avgpool := l4.MustAdaptiveAvgPool2d([]int64{1, 1}, false) l4.MustDrop() fv := avgpool.FlatView() avgpool.MustDrop() @@ -239,7 +239,7 @@ func bottleneckResnet(path nn.Path, nclasses int64, c1, c2, c3, c4 int64) (retVa bn1 := c1.ApplyT(bn1, train) c1.MustDrop() relu := bn1.MustRelu(true) - maxpool := relu.MustMaxPool2D([]int64{3, 3}, []int64{2, 2}, []int64{1, 1}, []int64{1, 1}, false, true) + maxpool := relu.MustMaxPool2d([]int64{3, 3}, []int64{2, 2}, []int64{1, 1}, []int64{1, 1}, false, true) l1 := maxpool.ApplyT(layer1, train) maxpool.MustDrop() l2 := l1.ApplyT(layer2, train) @@ -248,7 +248,7 @@ func bottleneckResnet(path nn.Path, nclasses int64, c1, c2, c3, c4 int64) (retVa l2.MustDrop() l4 := l3.ApplyT(layer4, train) l3.MustDrop() - avgpool := l4.MustAdaptiveAvgPool2D([]int64{1, 1}) + avgpool := l4.MustAdaptiveAvgPool2d([]int64{1, 1}, false) l4.MustDrop() retVal = avgpool.FlatView() avgpool.MustDrop() diff --git a/vision/squeezenet.go b/vision/squeezenet.go index cfcb5ed..f0a4322 100644 --- a/vision/squeezenet.go +++ b/vision/squeezenet.go @@ -8,7 +8,7 @@ import ( ) func snMaxPool2D(xs ts.Tensor) (retVal ts.Tensor) { - return xs.MustMaxPool2D([]int64{3, 3}, []int64{2, 2}, []int64{0, 0}, []int64{1, 1}, true, false) + return xs.MustMaxPool2d([]int64{3, 3}, []int64{2, 2}, []int64{0, 0}, []int64{1, 1}, true, false) } func fire(p nn.Path, cIn int64, cSqueeze int64, cExp1 int64, cExp3 int64) (retVal ts.ModuleT) { @@ -31,7 +31,7 @@ func fire(p nn.Path, cIn int64, cSqueeze int64, cExp1 int64, cExp3 int64) (retVa exp3Tmp := tmp2.Apply(exp3) exp3Ts := exp3Tmp.MustRelu(true) - return ts.MustCat([]ts.Tensor{exp1Ts, exp3Ts}, 1, true) + return ts.MustCat([]ts.Tensor{exp1Ts, exp3Ts}, 1) }) } @@ -119,14 +119,14 @@ func squeezenet(p nn.Path, v1_0 bool, nclasses int64) (retVal ts.ModuleT) { } features.AddFnT(nn.NewFuncT(func(xs ts.Tensor, train bool) ts.Tensor { - return xs.MustDropout(0.5, train, false) + return ts.MustDropout(xs, 0.5, train) })) features.Add(nn.NewConv2D(cp.Sub("1"), 512, nclasses, 1, finalConvConfig)) features.AddFn(nn.NewFunc(func(xs ts.Tensor) ts.Tensor { tmp1 := xs.MustRelu(false) - tmp2 := tmp1.MustAdaptiveAvgPool2D([]int64{1, 1}) + tmp2 := tmp1.MustAdaptiveAvgPool2d([]int64{1, 1}, false) tmp1.MustDrop() res := tmp2.FlatView() tmp2.MustDrop() diff --git a/vision/vgg.go b/vision/vgg.go index 5d4cc8a..e2b7d62 100644 --- a/vision/vgg.go +++ b/vision/vgg.go @@ -101,7 +101,7 @@ func vgg(path nn.Path, config [][]int64, nclasses int64, batchNorm bool) nn.Sequ })) seq.AddFn(nn.NewFuncT(func(xs ts.Tensor, train bool) ts.Tensor { - return xs.MustDropout(0.5, train, false) + return ts.MustDropout(xs, 0.5, train) })) seq.Add(nn.NewLinear(c.Sub(fmt.Sprint("3")), 4096, 4096, nn.DefaultLinearConfig())) @@ -111,7 +111,7 @@ func vgg(path nn.Path, config [][]int64, nclasses int64, batchNorm bool) nn.Sequ })) seq.AddFn(nn.NewFuncT(func(xs ts.Tensor, train bool) ts.Tensor { - return xs.MustDropout(0.5, train, false) + return ts.MustDropout(xs, 0.5, train) })) seq.Add(nn.NewLinear(c.Sub(fmt.Sprint("6")), 4096, nclasses, nn.DefaultLinearConfig()))