diff --git a/CHANGELOG.md b/CHANGELOG.md index 81a16ad..974388e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -41,3 +41,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Added - Added drawing image label at `example/yolo` example - Added some example images and README files for `example/yolo` and `example/neural-style-transfer` + +## [0.3.0] + +### Changed +- Updated to Pytorch C++ APIs v1.7.0 +- Switched back to `lib.AtoAddParametersOld` as the `ato_add_parameters` has not been implemented correctly. Using the updated API will cause optimizer stops working. diff --git a/README.md b/README.md index d05ea1d..ad57dfe 100644 --- a/README.md +++ b/README.md @@ -5,32 +5,32 @@ - **GoTch** is a C++ Libtorch Go binding for developing and implementing deep learning projects in Go. - This package is to create a thin wrapper of Libtorch to make use of its tensor APIs and CUDA support while implementing as much idiomatic Go as possible. -- There are about **1129** auto-generated tensor APIs. +- There are about **1404** auto-generated tensor APIs. ## Dependencies -- **Libtorch** C++ v1.5.0 library of [Pytorch](https://pytorch.org/) +- **Libtorch** C++ v1.7.0 library of [Pytorch](https://pytorch.org/) ## Installation - **CPU** - Default values: `LIBTORCH_VER=1.5.1` and `GOTCH_VER=v0.1.7` + Default values: `LIBTORCH_VER=1.7.0` and `GOTCH_VER=v0.3.0` ```bash - go get -u github.com/sugarme/gotch@v0.1.7 - bash ${GOPATH}/pkg/mod/github.com/sugarme/gotch@v0.1.7/setup-cpu.sh + go get -u github.com/sugarme/gotch@v0.3.0 + bash ${GOPATH}/pkg/mod/github.com/sugarme/gotch@v0.3.0/setup-cpu.sh ``` - **GPU** - Default values: `LIBTORCH_VER=1.5.1`, `CUDA_VER=10.1` and `GOTCH_VER=v0.1.7` + Default values: `LIBTORCH_VER=1.7.0`, `CUDA_VER=10.1` and `GOTCH_VER=v0.3.0` ```bash - go get -u github.com/sugarme/gotch@v0.1.7 - bash ${GOPATH}/pkg/mod/github.com/sugarme/gotch@v0.1.7/setup-gpu.sh + go get -u github.com/sugarme/gotch@v0.3.0 + bash ${GOPATH}/pkg/mod/github.com/sugarme/gotch@v0.3.0/setup-gpu.sh ``` diff --git a/example/char-rnn/main.go b/example/char-rnn/main.go index b1af728..199271c 100644 --- a/example/char-rnn/main.go +++ b/example/char-rnn/main.go @@ -116,9 +116,9 @@ func main() { sumLoss += loss.Float64Values()[0] cntLoss += 1.0 - batchTs.MustDrop() - batchNarrow.MustDrop() - xsOnehotTmp.MustDrop() + // batchTs.MustDrop() + // batchNarrow.MustDrop() + // xsOnehotTmp.MustDrop() xsOnehot.MustDrop() ys.MustDrop() lstmOut.MustDrop() diff --git a/example/mnist/cnn.go b/example/mnist/cnn.go index c8f80b5..1e5aaa8 100644 --- a/example/mnist/cnn.go +++ b/example/mnist/cnn.go @@ -117,21 +117,21 @@ func runCNN1() { logits := net.ForwardT(bImages, true) loss := logits.CrossEntropyForLogits(bLabels) - // loss = loss.MustSetRequiresGrad(true) + // loss = loss.MustSetRequiresGrad(true, false) opt.BackwardStep(loss) epocLoss = loss.MustShallowClone() epocLoss.Detach_() - // fmt.Printf("completed \t %v batches\t %.2f\n", i, loss.Values()[0]) + // fmt.Printf("completed \t %v batches\t %.2f\n", i, loss.Float64Values()[0]) bImages.MustDrop() bLabels.MustDrop() } - vs.Freeze() + // vs.Freeze() testAccuracy := nn.BatchAccuracyForLogits(vs, net, testImages, testLabels, vs.Device(), 1024) - vs.Unfreeze() + // vs.Unfreeze() fmt.Printf("Epoch: %v\t Loss: %.2f \t Test accuracy: %.2f%%\n", epoch, epocLoss.Float64Values()[0], testAccuracy*100.0) if testAccuracy > bestAccuracy { bestAccuracy = testAccuracy diff --git a/example/mnist/linear.go b/example/mnist/linear.go index 2f19266..7b2348a 100644 --- a/example/mnist/linear.go +++ b/example/mnist/linear.go @@ -45,7 +45,7 @@ func runLinear() { }) testLogits := ds.TestImages.MustMm(ws, false).MustAdd(bs, true) - testAccuracy := testLogits.MustArgmax(-1, false, true).MustEq1(ds.TestLabels, true).MustTotype(gotch.Float, true).MustMean(gotch.Float, true).MustView([]int64{-1}, true).MustFloat64Value([]int64{0}) + testAccuracy := testLogits.MustArgmax([]int64{-1}, false, true).MustEq1(ds.TestLabels, true).MustTotype(gotch.Float, true).MustMean(gotch.Float, true).MustView([]int64{-1}, true).MustFloat64Value([]int64{0}) fmt.Printf("Epoch: %v - Loss: %.3f - Test accuracy: %.2f%%\n", epoch, loss.Float64Values()[0], testAccuracy*100) diff --git a/example/tensor-io/file.pt b/example/tensor-io/file.pt index 382a733..0df0ebd 100644 Binary files a/example/tensor-io/file.pt and b/example/tensor-io/file.pt differ diff --git a/example/tensor-io/file_multi.pt b/example/tensor-io/file_multi.pt index c56860e..8435a5c 100644 Binary files a/example/tensor-io/file_multi.pt and b/example/tensor-io/file_multi.pt differ diff --git a/example/tensor-io/mnist-tensor-saved.png b/example/tensor-io/mnist-tensor-saved.png index 390489b..6e08e9f 100644 Binary files a/example/tensor-io/mnist-tensor-saved.png and b/example/tensor-io/mnist-tensor-saved.png differ diff --git a/example/yolo/darknet.go b/example/yolo/darknet.go index a3458a6..0035c61 100644 --- a/example/yolo/darknet.go +++ b/example/yolo/darknet.go @@ -271,7 +271,7 @@ func upsample(prevChannels int64) (retVal1 int64, retVal2 interface{}) { h := res[2] w := res[3] - return xs.MustUpsampleNearest2d([]int64{h * 2, w * 2}, 2.0, 2.0, false) + return xs.MustUpsampleNearest2d([]int64{h * 2, w * 2}, []float64{2.0}, []float64{2.0}, false) }) return prevChannels, Layer{Val: layer} diff --git a/gen/gen.ml b/gen/gen.ml index d48d373..43be656 100644 --- a/gen/gen.ml +++ b/gen/gen.ml @@ -28,7 +28,8 @@ let excluded_functions = ; "_amp_non_finite_check_and_unscale_" ; "_cummin_helper" ; "_cummax_helper" - ; "retain_grad" ] + ; "retain_grad" + ; "_validate_sparse_coo_tensor_args" ] let no_tensor_options = Set.of_list @@ -47,7 +48,7 @@ let no_tensor_options = * (module String) * ["add"; "add_"; "div"; "div_"; "mul"; "mul_"; "sub"; "sub_"; "nll_loss"] * *) -let excluded_prefixes = ["_thnn_"; "_th_"; "thnn_"; "th_"] +let excluded_prefixes = ["_thnn_"; "_th_"; "thnn_"; "th_"; "_foreach"] let excluded_suffixes = ["_forward"; "_forward_out"] @@ -79,7 +80,9 @@ module Func = struct type arg_type = | Bool | Int64 + | Int64Option | Double + | DoubleOption | Tensor | TensorOption | IntList @@ -104,8 +107,8 @@ module Func = struct let arg_type_of_string str ~is_nullable = match String.lowercase str with | "bool" -> Some Bool - | "int64_t" -> Some Int64 - | "double" -> Some Double + | "int64_t" -> Some (if is_nullable then Int64Option else Int64) + | "double" -> Some (if is_nullable then DoubleOption else Double) | "booltensor" | "indextensor" | "tensor" -> Some (if is_nullable then TensorOption else Tensor) | "tensoroptions" -> Some TensorOptions @@ -127,6 +130,10 @@ module Func = struct | TensorOptions -> Printf.sprintf "int %s_kind, int %s_device" arg_name arg_name | String -> Printf.sprintf "char* %s_ptr, int %s_len" arg_name arg_name + | Int64Option -> + Printf.sprintf "int64_t %s_v, uint8_t %s_null" arg_name arg_name + | DoubleOption -> + Printf.sprintf "double %s_v, uint8_t %s_null" arg_name arg_name | otherwise -> let simple_type_cstring = match otherwise with @@ -138,7 +145,9 @@ module Func = struct | ScalarType -> "int" | Device -> "int" | Scalar -> "scalar" - | String | IntList | TensorList | TensorOptions -> assert false + | Int64Option | DoubleOption | String | IntList | TensorList + |TensorOptions -> + assert false in Printf.sprintf "%s %s" simple_type_cstring arg_name ) |> String.concat ~sep:", " @@ -162,6 +171,14 @@ module Func = struct Printf.sprintf "at::device(device_of_int(%s_device)).dtype(at::ScalarType(%s_kind))" arg_name arg_name + | Int64Option -> + Printf.sprintf + "%s_null ? c10::nullopt : c10::optional(%s_v)" arg_name + arg_name + | DoubleOption -> + Printf.sprintf + "%s_null ? c10::nullopt : c10::optional(%s_v)" arg_name + arg_name | ScalarType -> Printf.sprintf "at::ScalarType(%s)" arg_name | Device -> Printf.sprintf "device_of_int(%s)" arg_name | _ -> arg_name ) @@ -229,6 +246,8 @@ module Func = struct | String -> single_param "string" | IntList -> Printf.sprintf "%sData []int64, %sLen int" an an | TensorList -> Printf.sprintf "%sData []Ctensor, %sLen int" an an + | Int64Option -> Printf.sprintf "%sVal int64, %sNull int" an an + | DoubleOption -> Printf.sprintf "%sVal float64, %sNull int" an an | TensorOptions -> Printf.sprintf "%sKind int32, %sDevice int32" an an ) |> String.concat ~sep:", " @@ -250,6 +269,8 @@ module Func = struct | String -> Printf.sprintf "c%s, c%sLen" an an | IntList -> Printf.sprintf "c%sDataPtr, c%sLen" an an | TensorList -> Printf.sprintf "c%sDataPtr, c%sLen" an an + | Int64Option -> Printf.sprintf "c%sVal, c%sNull" an an + | DoubleOption -> Printf.sprintf "c%sVal, c%sNull" an an | TensorOptions -> Printf.sprintf "c%sKind, c%sDevice" an an ) |> String.concat ~sep:", " @@ -291,6 +312,18 @@ module Func = struct c%sDataPtr := (*Ctensor)(unsafe.Pointer(&%sData[0]))\n\ c%sLen := *(*C.int)(unsafe.Pointer(&%sLen))" an an an an + | Int64Option -> + Printf.sprintf + "\n\ + c%sVal := *(*C.int64_t)(unsafe.Pointer(&%sVal))\n\ + c%sNull := *(*C.uint8_t)(unsafe.Pointer(&%sNull))" + an an an an + | DoubleOption -> + Printf.sprintf + "\n\ + c%sVal := *(*C.double)(unsafe.Pointer(&%sVal))\n\ + c%sNull := *(*C.uint8_t)(unsafe.Pointer(&%sNull))" + an an an an | TensorOptions -> Printf.sprintf "\n\ @@ -356,6 +389,8 @@ module Func = struct | TensorOptions -> "gotch.KindDevice" | Scalar -> "*Scalar" | ScalarType -> "gotch.DType" + | Int64Option -> "[]int64" + | DoubleOption -> "[]float64" | Device -> "gotch.Device" in match arg.arg_type with @@ -436,6 +471,8 @@ module Func = struct | String -> Printf.sprintf "%s" name | IntList -> Printf.sprintf "%s, len(%s)" name name | TensorList -> Printf.sprintf "c%s, len(c%s)" name name + | Int64Option -> Printf.sprintf "c%sVal, c%sNull" name name + | DoubleOption -> Printf.sprintf "c%sVal, c%sNull" name name | TensorOption -> Printf.sprintf "%s.ctensor" name | _ -> name ) |> String.concat ~sep:", " @@ -456,6 +493,24 @@ module Func = struct | Device -> "" | String -> "" | IntList -> "" + | Int64Option -> + Printf.sprintf + "var c%sVal int64 = 0\n\ + \ var c%sNull int = 1\n\ + \ if len(%s) > 0 {\n\ + \ c%sVal = %s[0]\n\ + \ c%sNull = 0\n\ + \ }\n" + an an an an an an + | DoubleOption -> + Printf.sprintf + "var c%sVal float64 = 0.0\n\ + \ var c%sNull int = 1\n\ + \ if len(%s) > 0 {\n\ + \ c%sVal = %s[0]\n\ + \ c%sNull = 0\n\ + \ }\n" + an an an an an an | TensorList -> Printf.sprintf " var c%s []lib.Ctensor\n\ @@ -687,7 +742,16 @@ let write_wrapper funcs filename = ; "Split" ; "SplitWithSizes" ; "Unbind" - ; "Where" ] + ; "Where" + ; "Atleast1d1" + ; "Atleast2d1" + ; "Atleast3d1" + ; "Dequantize1" + ; "QuantizePerTensor1" + ; "UnsafeChunk" + ; "UnsafeSplit" + ; "UnsafeSplitWithSizes" + ; "AlignTensors" ] in if List.exists excluded_funcs ~f:(fun name -> @@ -793,7 +857,16 @@ let write_must_wrapper funcs filename = ; "Split" ; "SplitWithSizes" ; "Unbind" - ; "Where" ] + ; "Where" + ; "Atleast1d1" + ; "Atleast2d1" + ; "Atleast3d1" + ; "Dequantize1" + ; "QuantizePerTensor1" + ; "UnsafeChunk" + ; "UnsafeSplit" + ; "UnsafeSplitWithSizes" + ; "AlignTensors" ] in if List.exists excluded_funcs ~f:(fun name -> @@ -943,7 +1016,7 @@ let run ~yaml_filename ~cpp_filename ~ffi_filename ~must_wrapper_filename write_wrapper funcs wrapper_filename let () = - run ~yaml_filename:"gen/pytorch/Declarations-v1.5.0.yaml" + run ~yaml_filename:"gen/pytorch/Declarations-v1.7.0.yaml" ~cpp_filename:"libtch/torch_api_generated" ~ffi_filename:"libtch/c-generated.go" ~must_wrapper_filename:"tensor/must-tensor-generated.go" diff --git a/gen/pytorch/Declarations-v1.7.0.yaml b/gen/pytorch/Declarations-v1.7.0.yaml new file mode 100644 index 0000000..2d56754 --- /dev/null +++ b/gen/pytorch/Declarations-v1.7.0.yaml @@ -0,0 +1,111616 @@ +- name: _cast_Byte + operator_name: _cast_Byte + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_cast_Byte(Tensor self, bool non_blocking=False) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: non_blocking + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: non_blocking + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _cast_Char + operator_name: _cast_Char + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_cast_Char(Tensor self, bool non_blocking=False) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: non_blocking + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: non_blocking + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _cast_Double + operator_name: _cast_Double + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_cast_Double(Tensor self, bool non_blocking=False) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: non_blocking + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: non_blocking + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _cast_Float + operator_name: _cast_Float + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_cast_Float(Tensor self, bool non_blocking=False) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: non_blocking + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: non_blocking + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _cast_Int + operator_name: _cast_Int + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_cast_Int(Tensor self, bool non_blocking=False) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: non_blocking + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: non_blocking + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _cast_Long + operator_name: _cast_Long + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_cast_Long(Tensor self, bool non_blocking=False) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: non_blocking + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: non_blocking + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _cast_Short + operator_name: _cast_Short + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_cast_Short(Tensor self, bool non_blocking=False) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: non_blocking + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: non_blocking + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _cast_Half + operator_name: _cast_Half + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_cast_Half(Tensor self, bool non_blocking=False) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: non_blocking + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: non_blocking + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: backward + operator_name: backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: true + category_override: '' + matches_jit_signature: true + schema_string: aten::backward(Tensor self, Tensor? gradient=None, bool? retain_graph=None, bool create_graph=False) -> () + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: gradient + type: const c10::optional& + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + name: retain_graph + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: create_graph + type: bool + schema_order_cpp_signature: void (const Tensor &, const c10::optional&, c10::optional, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: gradient + type: const c10::optional& + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + name: retain_graph + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: create_graph + type: bool + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: [] + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: set_data + operator_name: set_data + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: true + category_override: '' + matches_jit_signature: true + schema_string: aten::set_data(Tensor(a!) self, Tensor new_data) -> () + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: new_data + type: const Tensor & + schema_order_cpp_signature: void (Tensor &, const Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: new_data + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: [] + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: data + operator_name: data + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: true + category_override: '' + matches_jit_signature: true + schema_string: aten::data(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: is_leaf + operator_name: is_leaf + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: true + category_override: '' + matches_jit_signature: true + schema_string: aten::is_leaf(Tensor self) -> bool + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: bool (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: bool + name: result + type: bool + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: output_nr + operator_name: output_nr + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: true + category_override: '' + matches_jit_signature: true + schema_string: aten::output_nr(Tensor self) -> int + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: int64_t (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: int64_t + name: result + type: int64_t + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _version + operator_name: _version + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: true + category_override: '' + matches_jit_signature: true + schema_string: aten::_version(Tensor self) -> int + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: int64_t (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: int64_t + name: result + type: int64_t + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: requires_grad_ + operator_name: requires_grad_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: true + category_override: '' + matches_jit_signature: true + schema_string: aten::requires_grad_(Tensor(a!) self, bool requires_grad=True) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: requires_grad + type: bool + schema_order_cpp_signature: Tensor & (Tensor &, bool) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: requires_grad + type: bool + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: retain_grad + operator_name: retain_grad + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: true + category_override: '' + matches_jit_signature: true + schema_string: aten::retain_grad(Tensor(a!) self) -> () + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + schema_order_cpp_signature: void (Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: [] + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: rename_ + operator_name: rename_ + overload_name: '' + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::rename_(Tensor(a!) self, Dimname[]? names) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: DimnameList + is_nullable: true + name: names + type: c10::optional + schema_order_cpp_signature: Tensor & (Tensor &, c10::optional) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: DimnameList + is_nullable: true + name: names + type: c10::optional + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: rename + operator_name: rename + overload_name: '' + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::rename(Tensor(a) self, Dimname[]? names) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: DimnameList + is_nullable: true + name: names + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, c10::optional) + schema_order_arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: DimnameList + is_nullable: true + name: names + type: c10::optional + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: align_to + operator_name: align_to + overload_name: '' + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::align_to(Tensor(a) self, Dimname[] names) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: DimnameList + is_nullable: false + name: names + type: DimnameList + schema_order_cpp_signature: Tensor (const Tensor &, DimnameList) + schema_order_arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: DimnameList + is_nullable: false + name: names + type: DimnameList + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: align_to + operator_name: align_to + overload_name: ellipsis_idx + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::align_to.ellipsis_idx(Tensor(a) self, Dimname[] order, int ellipsis_idx) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: DimnameList + is_nullable: false + name: order + type: DimnameList + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: ellipsis_idx + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, DimnameList, int64_t) + schema_order_arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: DimnameList + is_nullable: false + name: order + type: DimnameList + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: ellipsis_idx + type: int64_t + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: align_as + operator_name: align_as + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::align_as(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: align_tensors + operator_name: align_tensors + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::align_tensors(Tensor[] tensors) -> Tensor[] + arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors + type: TensorList + schema_order_cpp_signature: std::vector (TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors + type: TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: TensorList + name: result + type: std::vector + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: refine_names + operator_name: refine_names + overload_name: '' + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::refine_names(Tensor(a) self, Dimname[] names) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: DimnameList + is_nullable: false + name: names + type: DimnameList + schema_order_cpp_signature: Tensor (const Tensor &, DimnameList) + schema_order_arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: DimnameList + is_nullable: false + name: names + type: DimnameList + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _use_cudnn_ctc_loss + operator_name: _use_cudnn_ctc_loss + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_use_cudnn_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank) -> bool + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: log_probs + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: targets + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: input_lengths + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: target_lengths + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: blank + type: int64_t + schema_order_cpp_signature: bool (const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: log_probs + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: targets + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: input_lengths + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: target_lengths + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: blank + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: bool + name: result + type: bool + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _cudnn_ctc_loss + operator_name: _cudnn_ctc_loss + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_cudnn_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank, bool deterministic, bool zero_infinity) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: log_probs + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: targets + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: input_lengths + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: target_lengths + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: blank + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: zero_infinity + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, int64_t, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: log_probs + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: targets + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: input_lengths + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: target_lengths + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: blank + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: zero_infinity + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _use_cudnn_rnn_flatten_weight + operator_name: _use_cudnn_rnn_flatten_weight + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_use_cudnn_rnn_flatten_weight() -> bool + arguments: [] + schema_order_cpp_signature: bool () + schema_order_arguments: [] + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: bool + name: result + type: bool + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _cudnn_rnn_flatten_weight + operator_name: _cudnn_rnn_flatten_weight + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_cudnn_rnn_flatten_weight(Tensor[] weight_arr, int weight_stride0, int input_size, int mode, int hidden_size, int num_layers, bool batch_first, bool bidirectional) -> Tensor + arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: weight_arr + type: TensorList + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: weight_stride0 + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: input_size + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: mode + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: hidden_size + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_layers + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: batch_first + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: bidirectional + type: bool + schema_order_cpp_signature: Tensor (TensorList, int64_t, int64_t, int64_t, int64_t, int64_t, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: weight_arr + type: TensorList + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: weight_stride0 + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: input_size + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: mode + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: hidden_size + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_layers + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: batch_first + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: bidirectional + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _cudnn_rnn + operator_name: _cudnn_rnn + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_cudnn_rnn(Tensor input, Tensor[] weight, int weight_stride0, Tensor? weight_buf, Tensor hx, Tensor? cx, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state) -> (Tensor, Tensor, Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: weight + type: TensorList + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: weight_stride0 + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: weight_buf + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: hx + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: cx + type: const c10::optional& + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: mode + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: hidden_size + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_layers + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: batch_first + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: bidirectional + type: bool + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: batch_sizes + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: dropout_state + type: const c10::optional& + schema_order_cpp_signature: std::tuple (const Tensor &, TensorList, int64_t, const c10::optional&, const Tensor &, const c10::optional&, int64_t, int64_t, int64_t, bool, double, bool, bool, IntArrayRef, const c10::optional&) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: weight + type: TensorList + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: weight_stride0 + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: weight_buf + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: hx + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: cx + type: const c10::optional& + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: mode + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: hidden_size + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_layers + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: batch_first + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: bidirectional + type: bool + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: batch_sizes + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: dropout_state + type: const c10::optional& + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + - dynamic_type: Tensor + name: result2 + type: Tensor + - dynamic_type: Tensor + name: result3 + type: Tensor + - dynamic_type: Tensor + name: result4 + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _cudnn_rnn_backward + operator_name: _cudnn_rnn_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_cudnn_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[]) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: weight + type: TensorList + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: weight_stride0 + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight_buf + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: hx + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: cx + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: grad_output + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: grad_hy + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: grad_cy + type: const c10::optional& + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: mode + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: hidden_size + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_layers + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: batch_first + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: bidirectional + type: bool + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: batch_sizes + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: dropout_state + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: reserve + type: const Tensor & + - annotation: null + dynamic_type: std::array + is_nullable: false + name: output_mask + type: std::array + schema_order_cpp_signature: std::tuple> (const Tensor &, TensorList, int64_t, const Tensor &, const Tensor &, const c10::optional&, const Tensor &, const c10::optional&, const c10::optional&, const c10::optional&, int64_t, int64_t, int64_t, bool, double, bool, bool, IntArrayRef, const c10::optional&, const Tensor &, std::array) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: weight + type: TensorList + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: weight_stride0 + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight_buf + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: hx + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: cx + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: grad_output + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: grad_hy + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: grad_cy + type: const c10::optional& + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: mode + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: hidden_size + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_layers + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: batch_first + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: bidirectional + type: bool + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: batch_sizes + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: dropout_state + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: reserve + type: const Tensor & + - annotation: null + dynamic_type: std::array + is_nullable: false + name: output_mask + type: std::array + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + - dynamic_type: Tensor + name: result2 + type: Tensor + - dynamic_type: TensorList + name: result3 + type: std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _cudnn_init_dropout_state + operator_name: _cudnn_init_dropout_state + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_cudnn_init_dropout_state(float dropout, bool train, int dropout_seed, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + arguments: + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dropout_seed + type: int64_t + - annotation: null + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + schema_order_cpp_signature: Tensor (double, bool, int64_t, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dropout_seed + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: true + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _debug_has_internal_overlap + operator_name: _debug_has_internal_overlap + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_debug_has_internal_overlap(Tensor self) -> int + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: int64_t (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: int64_t + name: result + type: int64_t + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _fused_dropout + operator_name: _fused_dropout + overload_name: '' + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_fused_dropout(Tensor self, float p, Generator? generator=None) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + default: c10::nullopt + dynamic_type: Generator + is_nullable: true + name: generator + type: c10::optional + schema_order_cpp_signature: std::tuple (const Tensor &, double, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + default: c10::nullopt + dynamic_type: Generator + is_nullable: true + name: generator + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _masked_scale + operator_name: _masked_scale + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_masked_scale(Tensor self, Tensor mask, float scale) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mask + type: const Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: scale + type: double + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, double) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mask + type: const Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: scale + type: double + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _sobol_engine_draw + operator_name: _sobol_engine_draw + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_sobol_engine_draw(Tensor quasi, int n, Tensor sobolstate, int dimension, int num_generated, ScalarType? dtype) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: quasi + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: sobolstate + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dimension + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_generated + type: int64_t + - annotation: null + dynamic_type: ScalarType + is_nullable: true + name: dtype + type: c10::optional + schema_order_cpp_signature: std::tuple (const Tensor &, int64_t, const Tensor &, int64_t, int64_t, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: quasi + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: sobolstate + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dimension + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_generated + type: int64_t + - annotation: null + dynamic_type: ScalarType + is_nullable: true + name: dtype + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _sobol_engine_ff_ + operator_name: _sobol_engine_ff_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_sobol_engine_ff_(Tensor(a!) self, int n, Tensor sobolstate, int dimension, int num_generated) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: sobolstate + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dimension + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_generated + type: int64_t + schema_order_cpp_signature: Tensor & (Tensor &, int64_t, const Tensor &, int64_t, int64_t) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: sobolstate + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dimension + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_generated + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _sobol_engine_scramble_ + operator_name: _sobol_engine_scramble_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_sobol_engine_scramble_(Tensor(a!) self, Tensor ltm, int dimension) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: ltm + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dimension + type: int64_t + schema_order_cpp_signature: Tensor & (Tensor &, const Tensor &, int64_t) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: ltm + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dimension + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _sobol_engine_initialize_state_ + operator_name: _sobol_engine_initialize_state_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_sobol_engine_initialize_state_(Tensor(a!) self, int dimension) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dimension + type: int64_t + schema_order_cpp_signature: Tensor & (Tensor &, int64_t) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dimension + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _reshape_from_tensor + operator_name: _reshape_from_tensor + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_reshape_from_tensor(Tensor self, Tensor shape) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: shape + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: shape + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _shape_as_tensor + operator_name: _shape_as_tensor + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_shape_as_tensor(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: dropout + operator_name: dropout + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::dropout(Tensor input, float p, bool train) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, double, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: dropout_ + operator_name: dropout_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + schema_order_cpp_signature: Tensor & (Tensor &, double, bool) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: feature_dropout + operator_name: feature_dropout + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::feature_dropout(Tensor input, float p, bool train) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, double, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: feature_dropout_ + operator_name: feature_dropout_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::feature_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + schema_order_cpp_signature: Tensor & (Tensor &, double, bool) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: alpha_dropout + operator_name: alpha_dropout + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::alpha_dropout(Tensor input, float p, bool train) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, double, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: alpha_dropout_ + operator_name: alpha_dropout_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + schema_order_cpp_signature: Tensor & (Tensor &, double, bool) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: feature_alpha_dropout + operator_name: feature_alpha_dropout + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::feature_alpha_dropout(Tensor input, float p, bool train) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, double, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: feature_alpha_dropout_ + operator_name: feature_alpha_dropout_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::feature_alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + schema_order_cpp_signature: Tensor & (Tensor &, double, bool) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: abs + operator_name: abs + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::abs(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: abs_ + operator_name: abs_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::abs_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + schema_order_cpp_signature: Tensor & (Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: abs_out + operator_name: abs + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::abs.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: absolute + operator_name: absolute + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::absolute(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: absolute_ + operator_name: absolute_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::absolute_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + schema_order_cpp_signature: Tensor & (Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: absolute_out + operator_name: absolute + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::absolute.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: angle + operator_name: angle + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::angle(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: angle_out + operator_name: angle + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::angle.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: view_as_real + operator_name: view_as_real + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::view_as_real(Tensor(a) self) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: view_as_complex + operator_name: view_as_complex + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::view_as_complex(Tensor(a) self) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sgn + operator_name: sgn + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::sgn(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sgn_ + operator_name: sgn_ + overload_name: '' + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::sgn_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + schema_order_cpp_signature: Tensor & (Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sgn_out + operator_name: sgn + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::sgn.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: real + operator_name: real + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::real(Tensor(a) self) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: imag + operator_name: imag + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::imag(Tensor(a) self) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: conj + operator_name: conj + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::conj(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: conj_out + operator_name: conj + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::conj.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _conj + operator_name: _conj + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_conj(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: acos + operator_name: acos + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::acos(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: acos_ + operator_name: acos_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::acos_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + schema_order_cpp_signature: Tensor & (Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: acos_out + operator_name: acos + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::acos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: arccos + operator_name: arccos + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::arccos(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: arccos_ + operator_name: arccos_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::arccos_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + schema_order_cpp_signature: Tensor & (Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: arccos_out + operator_name: arccos + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::arccos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: avg_pool1d + operator_name: avg_pool1d + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::avg_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, bool ceil_mode=False, bool count_include_pad=True) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 1 + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 1 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 1 + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: count_include_pad + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 1 + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 1 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 1 + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: count_include_pad + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: adaptive_avg_pool1d + operator_name: adaptive_avg_pool1d + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::adaptive_avg_pool1d(Tensor self, int[1] output_size) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 1 + type: IntArrayRef + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 1 + type: IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: adaptive_max_pool1d + operator_name: adaptive_max_pool1d + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::adaptive_max_pool1d(Tensor self, int[1] output_size) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 1 + type: IntArrayRef + schema_order_cpp_signature: std::tuple (const Tensor &, IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 1 + type: IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: add + operator_name: add + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::add.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: add_ + operator_name: add_ + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::add_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + schema_order_cpp_signature: Tensor & (Tensor &, const Tensor &, Scalar) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: add_out + operator_name: add + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::add.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, Scalar, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _add_relu + operator_name: _add_relu + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_add_relu.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _add_relu_ + operator_name: _add_relu_ + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_add_relu_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + schema_order_cpp_signature: Tensor & (Tensor &, const Tensor &, Scalar) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _add_relu_out + operator_name: _add_relu + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_add_relu.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, Scalar, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: add + operator_name: add + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::add.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + name: alpha + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, Scalar, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + name: alpha + type: Scalar + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: add_ + operator_name: add_ + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::add_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + name: alpha + type: Scalar + schema_order_cpp_signature: Tensor & (Tensor &, Scalar, Scalar) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + name: alpha + type: Scalar + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: addmv + operator_name: addmv + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::addmv(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mat + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: vec + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: beta + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const Tensor &, Scalar, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mat + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: vec + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: beta + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: addmv_ + operator_name: addmv_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::addmv_(Tensor(a!) self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mat + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: vec + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: beta + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + schema_order_cpp_signature: Tensor & (Tensor &, const Tensor &, const Tensor &, Scalar, Scalar) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mat + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: vec + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: beta + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: addmv_out + operator_name: addmv + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::addmv.out(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mat + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: vec + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: beta + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, const Tensor &, Scalar, Scalar, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mat + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: vec + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: beta + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _addmv_impl_ + operator_name: _addmv_impl_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_addmv_impl_(Tensor(a!) self, Tensor self2, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self2 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mat + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: vec + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: beta + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + schema_order_cpp_signature: Tensor & (Tensor &, const Tensor &, const Tensor &, const Tensor &, Scalar, Scalar) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self2 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mat + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: vec + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: beta + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: addr + operator_name: addr + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::addr(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: vec1 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: vec2 + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: beta + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const Tensor &, Scalar, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: vec1 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: vec2 + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: beta + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: addr_ + operator_name: addr_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::addr_(Tensor(a!) self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: vec1 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: vec2 + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: beta + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + schema_order_cpp_signature: Tensor & (Tensor &, const Tensor &, const Tensor &, Scalar, Scalar) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: vec1 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: vec2 + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: beta + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: addr_out + operator_name: addr + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::addr.out(Tensor self, Tensor vec1, Tensor vec2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: vec1 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: vec2 + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: beta + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, const Tensor &, Scalar, Scalar, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: vec1 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: vec2 + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: beta + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: affine_grid_generator + operator_name: affine_grid_generator + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::affine_grid_generator(Tensor theta, int[] size, bool align_corners) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: theta + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: theta + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: affine_grid_generator_backward + operator_name: affine_grid_generator_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::affine_grid_generator_backward(Tensor grad, int[] size, bool align_corners) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: all + operator_name: all + overload_name: dim + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::all.dim(Tensor self, int dim, bool keepdim=False) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, int64_t, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: all_out + operator_name: all + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::all.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: Tensor & (const Tensor &, int64_t, bool, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: all + operator_name: all + overload_name: dimname + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::all.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, Dimname, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: all_out + operator_name: all + overload_name: dimname_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::all.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: Tensor & (const Tensor &, Dimname, bool, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: allclose + operator_name: allclose + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::allclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> bool + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - annotation: null + default: 1.0e-05 + dynamic_type: double + is_nullable: false + name: rtol + type: double + - annotation: null + default: 1.0e-08 + dynamic_type: double + is_nullable: false + name: atol + type: double + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: equal_nan + type: bool + schema_order_cpp_signature: bool (const Tensor &, const Tensor &, double, double, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - annotation: null + default: 1.0e-05 + dynamic_type: double + is_nullable: false + name: rtol + type: double + - annotation: null + default: 1.0e-08 + dynamic_type: double + is_nullable: false + name: atol + type: double + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: equal_nan + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: bool + name: result + type: bool + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: any + operator_name: any + overload_name: dim + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::any.dim(Tensor self, int dim, bool keepdim=False) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, int64_t, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: any_out + operator_name: any + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::any.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: Tensor & (const Tensor &, int64_t, bool, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: any + operator_name: any + overload_name: dimname + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::any.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, Dimname, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: any_out + operator_name: any + overload_name: dimname_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::any.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: Tensor & (const Tensor &, Dimname, bool, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: arange + operator_name: arange + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: end + type: Scalar + - annotation: null + default: '{}' + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + schema_order_cpp_signature: Tensor (Scalar, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: end + type: Scalar + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: arange + operator_name: arange + overload_name: start + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: start + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: end + type: Scalar + - annotation: null + default: '{}' + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + schema_order_cpp_signature: Tensor (Scalar, Scalar, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: start + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: end + type: Scalar + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: arange + operator_name: arange + overload_name: start_step + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::arange.start_step(Scalar start, Scalar end, Scalar step, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: start + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: end + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: step + type: Scalar + - annotation: null + default: '{}' + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + schema_order_cpp_signature: Tensor (Scalar, Scalar, Scalar, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: start + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: end + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: step + type: Scalar + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: arange_out + operator_name: arange + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: end + type: Scalar + schema_order_cpp_signature: Tensor & (Scalar, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: end + type: Scalar + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: arange_out + operator_name: arange + overload_name: start_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: start + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: end + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + name: step + type: Scalar + schema_order_cpp_signature: Tensor & (Scalar, Scalar, Scalar, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: start + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: end + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + name: step + type: Scalar + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _dim_arange + operator_name: _dim_arange + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_dim_arange(Tensor like, int dim) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: like + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: like + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: argmax + operator_name: argmax + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::argmax(Tensor self, int? dim=None, bool keepdim=False) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, c10::optional, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: argmin + operator_name: argmin + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::argmin(Tensor self, int? dim=None, bool keepdim=False) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, c10::optional, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: acosh + operator_name: acosh + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::acosh(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: acosh_ + operator_name: acosh_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::acosh_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + schema_order_cpp_signature: Tensor & (Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: acosh_out + operator_name: acosh + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::acosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: arccosh + operator_name: arccosh + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::arccosh(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: arccosh_ + operator_name: arccosh_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::arccosh_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + schema_order_cpp_signature: Tensor & (Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: arccosh_out + operator_name: arccosh + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::arccosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: asinh + operator_name: asinh + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::asinh(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: asinh_ + operator_name: asinh_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::asinh_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + schema_order_cpp_signature: Tensor & (Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: asinh_out + operator_name: asinh + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::asinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: arcsinh + operator_name: arcsinh + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::arcsinh(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: arcsinh_ + operator_name: arcsinh_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::arcsinh_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + schema_order_cpp_signature: Tensor & (Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: arcsinh_out + operator_name: arcsinh + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::arcsinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: atanh + operator_name: atanh + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::atanh(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: atanh_ + operator_name: atanh_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::atanh_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + schema_order_cpp_signature: Tensor & (Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: atanh_out + operator_name: atanh + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::atanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: arctanh + operator_name: arctanh + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::arctanh(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: arctanh_ + operator_name: arctanh_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::arctanh_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + schema_order_cpp_signature: Tensor & (Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: arctanh_out + operator_name: arctanh + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::arctanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: as_strided + operator_name: as_strided + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::as_strided(Tensor(a) self, int[] size, int[] stride, int? storage_offset=None) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: storage_offset + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef, IntArrayRef, c10::optional) + schema_order_arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: storage_offset + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: as_strided_ + operator_name: as_strided_ + overload_name: '' + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::as_strided_(Tensor(a!) self, int[] size, int[] stride, int? storage_offset=None) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: storage_offset + type: c10::optional + schema_order_cpp_signature: Tensor & (Tensor &, IntArrayRef, IntArrayRef, c10::optional) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: storage_offset + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: asin + operator_name: asin + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::asin(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: asin_ + operator_name: asin_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::asin_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + schema_order_cpp_signature: Tensor & (Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: asin_out + operator_name: asin + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::asin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: arcsin + operator_name: arcsin + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::arcsin(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: arcsin_ + operator_name: arcsin_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::arcsin_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + schema_order_cpp_signature: Tensor & (Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: arcsin_out + operator_name: arcsin + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::arcsin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: atan + operator_name: atan + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::atan(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: atan_ + operator_name: atan_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::atan_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + schema_order_cpp_signature: Tensor & (Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: atan_out + operator_name: atan + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::atan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: arctan + operator_name: arctan + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::arctan(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: arctan_ + operator_name: arctan_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::arctan_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + schema_order_cpp_signature: Tensor & (Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: arctan_out + operator_name: arctan + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::arctan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: atleast_1d + operator_name: atleast_1d + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::atleast_1d(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: atleast_1d + operator_name: atleast_1d + overload_name: Sequence + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::atleast_1d.Sequence(Tensor[] tensors) -> Tensor[] + arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors + type: TensorList + schema_order_cpp_signature: std::vector (TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors + type: TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: TensorList + name: result + type: std::vector + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: atleast_2d + operator_name: atleast_2d + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::atleast_2d(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: atleast_2d + operator_name: atleast_2d + overload_name: Sequence + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::atleast_2d.Sequence(Tensor[] tensors) -> Tensor[] + arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors + type: TensorList + schema_order_cpp_signature: std::vector (TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors + type: TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: TensorList + name: result + type: std::vector + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: atleast_3d + operator_name: atleast_3d + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::atleast_3d(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: atleast_3d + operator_name: atleast_3d + overload_name: Sequence + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::atleast_3d.Sequence(Tensor[] tensors) -> Tensor[] + arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors + type: TensorList + schema_order_cpp_signature: std::vector (TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors + type: TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: TensorList + name: result + type: std::vector + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: baddbmm + operator_name: baddbmm + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::baddbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: batch1 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: batch2 + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: beta + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const Tensor &, Scalar, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: batch1 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: batch2 + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: beta + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: baddbmm_ + operator_name: baddbmm_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::baddbmm_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: batch1 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: batch2 + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: beta + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + schema_order_cpp_signature: Tensor & (Tensor &, const Tensor &, const Tensor &, Scalar, Scalar) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: batch1 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: batch2 + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: beta + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _baddbmm_mkl_ + operator_name: _baddbmm_mkl_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_baddbmm_mkl_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: batch1 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: batch2 + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: beta + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + schema_order_cpp_signature: Tensor & (Tensor &, const Tensor &, const Tensor &, Scalar, Scalar) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: batch1 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: batch2 + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: beta + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: baddbmm_out + operator_name: baddbmm + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::baddbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: batch1 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: batch2 + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: beta + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, const Tensor &, Scalar, Scalar, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: batch1 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: batch2 + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: beta + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bartlett_window + operator_name: bartlett_window + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::bartlett_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: window_length + type: int64_t + - annotation: null + default: '{}' + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + schema_order_cpp_signature: Tensor (int64_t, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: window_length + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bartlett_window + operator_name: bartlett_window + overload_name: periodic + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::bartlett_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: window_length + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: periodic + type: bool + - annotation: null + default: '{}' + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + schema_order_cpp_signature: Tensor (int64_t, bool, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: window_length + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: periodic + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: batch_norm + operator_name: batch_norm + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: weight + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: running_mean + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: running_var + type: const c10::optional& + - annotation: null + dynamic_type: bool + is_nullable: false + name: training + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: momentum + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: cudnn_enabled + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, const c10::optional&, const c10::optional&, const c10::optional&, const c10::optional&, bool, double, double, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: weight + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: running_mean + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: running_var + type: const c10::optional& + - annotation: null + dynamic_type: bool + is_nullable: false + name: training + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: momentum + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: cudnn_enabled + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: quantized_batch_norm + operator_name: quantized_batch_norm + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::quantized_batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: weight + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mean + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: var + type: const Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: output_scale + type: double + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: output_zero_point + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, const c10::optional&, const c10::optional&, const Tensor &, const Tensor &, double, double, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: weight + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mean + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: var + type: const Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: output_scale + type: double + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: output_zero_point + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _batch_norm_impl_index + operator_name: _batch_norm_impl_index + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_batch_norm_impl_index(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> (Tensor, Tensor, Tensor, Tensor, int) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: weight + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: running_mean + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: running_var + type: const c10::optional& + - annotation: null + dynamic_type: bool + is_nullable: false + name: training + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: momentum + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: cudnn_enabled + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, const c10::optional&, const c10::optional&, const c10::optional&, const c10::optional&, bool, double, double, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: weight + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: running_mean + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: running_var + type: const c10::optional& + - annotation: null + dynamic_type: bool + is_nullable: false + name: training + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: momentum + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: cudnn_enabled + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + - dynamic_type: Tensor + name: result2 + type: Tensor + - dynamic_type: Tensor + name: result3 + type: Tensor + - dynamic_type: int64_t + name: result4 + type: int64_t + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _batch_norm_impl_index_backward + operator_name: _batch_norm_impl_index_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_batch_norm_impl_index_backward(int impl_index, Tensor input, Tensor grad_output, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var_transform, bool train, float eps, bool[3] output_mask, Tensor reservedSpace) -> (Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: impl_index + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: weight + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: running_mean + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: running_var + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: save_mean + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: save_var_transform + type: const c10::optional& + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + - annotation: null + dynamic_type: std::array + is_nullable: false + name: output_mask + type: std::array + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: reservedSpace + type: const Tensor & + schema_order_cpp_signature: std::tuple (int64_t, const Tensor &, const Tensor &, const c10::optional&, const c10::optional&, const c10::optional&, const c10::optional&, const c10::optional&, bool, double, std::array, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: impl_index + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: weight + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: running_mean + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: running_var + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: save_mean + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: save_var_transform + type: const c10::optional& + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + - annotation: null + dynamic_type: std::array + is_nullable: false + name: output_mask + type: std::array + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: reservedSpace + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + - dynamic_type: Tensor + name: result2 + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bernoulli + operator_name: bernoulli + overload_name: '' + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::bernoulli(Tensor self, *, Generator? generator=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bernoulli_out + operator_name: bernoulli + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::bernoulli.out(Tensor self, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + schema_order_cpp_signature: Tensor & (const Tensor &, c10::optional, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bernoulli_ + operator_name: bernoulli_ + overload_name: Tensor + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::bernoulli_.Tensor(Tensor(a!) self, Tensor p, *, Generator? generator=None) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: p + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + schema_order_cpp_signature: Tensor & (Tensor &, const Tensor &, c10::optional) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: p + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bernoulli_ + operator_name: bernoulli_ + overload_name: float + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::bernoulli_.float(Tensor(a!) self, float p=0.5, *, Generator? generator=None) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + default: 0.5 + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + default: c10::nullopt + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + schema_order_cpp_signature: Tensor & (Tensor &, double, c10::optional) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + default: 0.5 + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + default: c10::nullopt + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bernoulli + operator_name: bernoulli + overload_name: p + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::bernoulli.p(Tensor self, float p, *, Generator? generator=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + default: c10::nullopt + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, double, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + default: c10::nullopt + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bilinear + operator_name: bilinear + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::bilinear(Tensor input1, Tensor input2, Tensor weight, Tensor? bias) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input1 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input2 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const Tensor &, const c10::optional&) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input1 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input2 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: binary_cross_entropy + operator_name: binary_cross_entropy + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::binary_cross_entropy(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: weight + type: const c10::optional& + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const c10::optional&, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: weight + type: const c10::optional& + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: binary_cross_entropy_out + operator_name: binary_cross_entropy + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::binary_cross_entropy.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: weight + type: const Tensor & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, const Tensor &, int64_t, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: weight + type: const Tensor & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: binary_cross_entropy_backward + operator_name: binary_cross_entropy_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::binary_cross_entropy_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: weight + type: const c10::optional& + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const Tensor &, const c10::optional&, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: weight + type: const c10::optional& + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: binary_cross_entropy_backward_out + operator_name: binary_cross_entropy_backward + overload_name: grad_input + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::binary_cross_entropy_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: weight + type: const Tensor & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, const Tensor &, const Tensor &, int64_t, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: weight + type: const Tensor & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: grad_input + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: binary_cross_entropy_with_logits + operator_name: binary_cross_entropy_with_logits + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::binary_cross_entropy_with_logits(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: weight + type: const c10::optional& + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: pos_weight + type: const c10::optional& + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const c10::optional&, const c10::optional&, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: weight + type: const c10::optional& + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: pos_weight + type: const c10::optional& + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: binary_cross_entropy_with_logits_backward + operator_name: binary_cross_entropy_with_logits_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::binary_cross_entropy_with_logits_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: weight + type: const c10::optional& + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: pos_weight + type: const c10::optional& + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const Tensor &, const c10::optional&, const c10::optional&, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: weight + type: const c10::optional& + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: pos_weight + type: const c10::optional& + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bincount + operator_name: bincount + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::bincount(Tensor self, Tensor? weights=None, int minlength=0) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: weights + type: const c10::optional& + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: minlength + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, const c10::optional&, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: weights + type: const c10::optional& + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: minlength + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bitwise_not + operator_name: bitwise_not + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::bitwise_not(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bitwise_not_ + operator_name: bitwise_not_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::bitwise_not_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + schema_order_cpp_signature: Tensor & (Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bitwise_not_out + operator_name: bitwise_not + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::bitwise_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logical_not + operator_name: logical_not + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::logical_not(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logical_not_ + operator_name: logical_not_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::logical_not_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + schema_order_cpp_signature: Tensor & (Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logical_not_out + operator_name: logical_not + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::logical_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logical_xor + operator_name: logical_xor + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::logical_xor(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logical_xor_ + operator_name: logical_xor_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::logical_xor_(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (Tensor &, const Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logical_xor_out + operator_name: logical_xor + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::logical_xor.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logical_and + operator_name: logical_and + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::logical_and(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logical_and_ + operator_name: logical_and_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::logical_and_(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (Tensor &, const Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logical_and_out + operator_name: logical_and + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::logical_and.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logical_or + operator_name: logical_or + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::logical_or(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logical_or_ + operator_name: logical_or_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::logical_or_(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (Tensor &, const Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logical_or_out + operator_name: logical_or + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::logical_or.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: blackman_window + operator_name: blackman_window + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::blackman_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: window_length + type: int64_t + - annotation: null + default: '{}' + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + schema_order_cpp_signature: Tensor (int64_t, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: window_length + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: blackman_window + operator_name: blackman_window + overload_name: periodic + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::blackman_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: window_length + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: periodic + type: bool + - annotation: null + default: '{}' + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + schema_order_cpp_signature: Tensor (int64_t, bool, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: window_length + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: periodic + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bmm + operator_name: bmm + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::bmm(Tensor self, Tensor mat2) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mat2 + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mat2 + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _bmm + operator_name: _bmm + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_bmm(Tensor self, Tensor mat2, *, bool deterministic=False) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mat2 + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: deterministic + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mat2 + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: deterministic + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bmm_out + operator_name: bmm + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::bmm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mat2 + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mat2 + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _bmm_out + operator_name: _bmm + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_bmm.out(Tensor self, Tensor mat2, *, bool deterministic=False, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mat2 + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: deterministic + type: bool + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, bool, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mat2 + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: deterministic + type: bool + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: broadcast_tensors + operator_name: broadcast_tensors + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::broadcast_tensors(Tensor[] tensors) -> Tensor[] + arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors + type: TensorList + schema_order_cpp_signature: std::vector (TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors + type: TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: TensorList + name: result + type: std::vector + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: cat + operator_name: cat + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::cat(Tensor[] tensors, int dim=0) -> Tensor + arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors + type: TensorList + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: Tensor (TensorList, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors + type: TensorList + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cat_out + operator_name: cat + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::cat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors + type: TensorList + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: Tensor & (TensorList, int64_t, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors + type: TensorList + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cat + operator_name: cat + overload_name: names + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::cat.names(Tensor[] tensors, Dimname dim) -> Tensor + arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors + type: TensorList + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + schema_order_cpp_signature: Tensor (TensorList, Dimname) + schema_order_arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors + type: TensorList + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cat_out + operator_name: cat + overload_name: names_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::cat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors + type: TensorList + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + schema_order_cpp_signature: Tensor & (TensorList, Dimname, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors + type: TensorList + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: block_diag + operator_name: block_diag + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::block_diag(Tensor[] tensors) -> Tensor + arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors + type: TensorList + schema_order_cpp_signature: Tensor (TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors + type: TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: ceil + operator_name: ceil + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::ceil(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: ceil_ + operator_name: ceil_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::ceil_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + schema_order_cpp_signature: Tensor & (Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: ceil_out + operator_name: ceil + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::ceil.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: chain_matmul + operator_name: chain_matmul + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::chain_matmul(Tensor[] matrices) -> Tensor + arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: matrices + type: TensorList + schema_order_cpp_signature: Tensor (TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: matrices + type: TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: unsafe_chunk + operator_name: unsafe_chunk + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::unsafe_chunk(Tensor self, int chunks, int dim=0) -> Tensor[] + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: chunks + type: int64_t + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: std::vector (const Tensor &, int64_t, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: chunks + type: int64_t + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: TensorList + name: result + type: std::vector + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: chunk + operator_name: chunk + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::chunk(Tensor(a) self, int chunks, int dim=0) -> Tensor(a)[] + arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: chunks + type: int64_t + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: std::vector (const Tensor &, int64_t, int64_t) + schema_order_arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: chunks + type: int64_t + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: TensorList + name: result + type: std::vector + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: clamp + operator_name: clamp + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::clamp(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: Scalar + is_nullable: true + name: min + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Scalar + is_nullable: true + name: max + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: Scalar + is_nullable: true + name: min + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Scalar + is_nullable: true + name: max + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: clamp_ + operator_name: clamp_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::clamp_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: Scalar + is_nullable: true + name: min + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Scalar + is_nullable: true + name: max + type: c10::optional + schema_order_cpp_signature: Tensor & (Tensor &, c10::optional, c10::optional) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: Scalar + is_nullable: true + name: min + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Scalar + is_nullable: true + name: max + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: clamp_out + operator_name: clamp + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::clamp.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: Scalar + is_nullable: true + name: min + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Scalar + is_nullable: true + name: max + type: c10::optional + schema_order_cpp_signature: Tensor & (const Tensor &, c10::optional, c10::optional, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: Scalar + is_nullable: true + name: min + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Scalar + is_nullable: true + name: max + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: clamp_max + operator_name: clamp_max + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::clamp_max(Tensor self, Scalar max) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: max + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: max + type: Scalar + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: clamp_max_ + operator_name: clamp_max_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::clamp_max_(Tensor(a!) self, Scalar max) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: max + type: Scalar + schema_order_cpp_signature: Tensor & (Tensor &, Scalar) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: max + type: Scalar + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: clamp_max_out + operator_name: clamp_max + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::clamp_max.out(Tensor self, Scalar max, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: max + type: Scalar + schema_order_cpp_signature: Tensor & (const Tensor &, Scalar, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: max + type: Scalar + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: clamp_min + operator_name: clamp_min + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::clamp_min(Tensor self, Scalar min) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: min + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: min + type: Scalar + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: clamp_min_ + operator_name: clamp_min_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::clamp_min_(Tensor(a!) self, Scalar min) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: min + type: Scalar + schema_order_cpp_signature: Tensor & (Tensor &, Scalar) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: min + type: Scalar + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: clamp_min_out + operator_name: clamp_min + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::clamp_min.out(Tensor self, Scalar min, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: min + type: Scalar + schema_order_cpp_signature: Tensor & (const Tensor &, Scalar, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: min + type: Scalar + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: clip + operator_name: clip + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::clip(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: Scalar + is_nullable: true + name: min + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Scalar + is_nullable: true + name: max + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: Scalar + is_nullable: true + name: min + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Scalar + is_nullable: true + name: max + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: clip_ + operator_name: clip_ + overload_name: '' + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::clip_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: Scalar + is_nullable: true + name: min + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Scalar + is_nullable: true + name: max + type: c10::optional + schema_order_cpp_signature: Tensor & (Tensor &, c10::optional, c10::optional) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: Scalar + is_nullable: true + name: min + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Scalar + is_nullable: true + name: max + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: clip_out + operator_name: clip + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::clip.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: Scalar + is_nullable: true + name: min + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Scalar + is_nullable: true + name: max + type: c10::optional + schema_order_cpp_signature: Tensor & (const Tensor &, c10::optional, c10::optional, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: Scalar + is_nullable: true + name: min + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Scalar + is_nullable: true + name: max + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cudnn_is_acceptable + operator_name: cudnn_is_acceptable + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::cudnn_is_acceptable(Tensor self) -> bool + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: bool (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: bool + name: result + type: bool + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: complex + operator_name: complex + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::complex(Tensor real, Tensor imag) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: real + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: imag + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: real + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: imag + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: complex_out + operator_name: complex + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::complex.out(Tensor real, Tensor imag, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: real + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: imag + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: real + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: imag + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: polar + operator_name: polar + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::polar(Tensor abs, Tensor angle) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: abs + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: angle + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: abs + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: angle + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: polar_out + operator_name: polar + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::polar.out(Tensor abs, Tensor angle, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: abs + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: angle + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: abs + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: angle + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: constant_pad_nd + operator_name: constant_pad_nd + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::constant_pad_nd(Tensor self, int[] pad, Scalar value=0) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: pad + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: Scalar + is_nullable: false + name: value + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: pad + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: Scalar + is_nullable: false + name: value + type: Scalar + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: contiguous + operator_name: contiguous + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::contiguous(Tensor(a) self, *, MemoryFormat memory_format=contiguous_format) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: MemoryFormat::Contiguous + dynamic_type: MemoryFormat + is_nullable: false + kwarg_only: true + name: memory_format + type: MemoryFormat + schema_order_cpp_signature: Tensor (const Tensor &, MemoryFormat) + schema_order_arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: MemoryFormat::Contiguous + dynamic_type: MemoryFormat + is_nullable: false + kwarg_only: true + name: memory_format + type: MemoryFormat + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: convolution + operator_name: convolution + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: transposed + type: bool + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_padding + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const c10::optional&, IntArrayRef, IntArrayRef, IntArrayRef, bool, IntArrayRef, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: transposed + type: bool + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_padding + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: convolution_overrideable + operator_name: convolution_overrideable + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::convolution_overrideable(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: transposed + type: bool + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_padding + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const c10::optional&, IntArrayRef, IntArrayRef, IntArrayRef, bool, IntArrayRef, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: transposed + type: bool + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_padding + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: convolution_backward_overrideable + operator_name: convolution_backward_overrideable + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::convolution_backward_overrideable(Tensor grad_output, Tensor input, Tensor weight, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: transposed + type: bool + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_padding + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: std::array + is_nullable: false + name: output_mask + type: std::array + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, bool, IntArrayRef, int64_t, std::array) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: transposed + type: bool + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_padding + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: std::array + is_nullable: false + name: output_mask + type: std::array + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + field_name: grad_input + name: grad_input + type: Tensor + - dynamic_type: Tensor + field_name: grad_weight + name: grad_weight + type: Tensor + - dynamic_type: Tensor + field_name: grad_bias + name: grad_bias + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _convolution + operator_name: _convolution + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_convolution(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: transposed + type: bool + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_padding + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: benchmark + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: cudnn_enabled + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: allow_tf32 + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const c10::optional&, IntArrayRef, IntArrayRef, IntArrayRef, bool, IntArrayRef, int64_t, bool, bool, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: transposed + type: bool + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_padding + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: benchmark + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: cudnn_enabled + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: allow_tf32 + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _convolution + operator_name: _convolution + overload_name: deprecated + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_convolution.deprecated(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: transposed + type: bool + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_padding + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: benchmark + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: cudnn_enabled + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const c10::optional&, IntArrayRef, IntArrayRef, IntArrayRef, bool, IntArrayRef, int64_t, bool, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: transposed + type: bool + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_padding + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: benchmark + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: cudnn_enabled + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _convolution_nogroup + operator_name: _convolution_nogroup + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_convolution_nogroup(Tensor input, Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: transposed + type: bool + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_padding + type: IntArrayRef + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const c10::optional&, IntArrayRef, IntArrayRef, IntArrayRef, bool, IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: transposed + type: bool + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_padding + type: IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _convolution_double_backward + operator_name: _convolution_double_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_convolution_double_backward(Tensor? ggI, Tensor? ggW, Tensor? ggb, Tensor gO, Tensor weight, Tensor self, int[] stride, int[] padding, int[] dilation, bool transposed, int[] output_padding, int groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32, bool[3] output_mask) -> (Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: ggI + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: ggW + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: ggb + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: gO + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: transposed + type: bool + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_padding + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: benchmark + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: cudnn_enabled + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: allow_tf32 + type: bool + - annotation: null + dynamic_type: std::array + is_nullable: false + name: output_mask + type: std::array + schema_order_cpp_signature: std::tuple (const c10::optional&, const c10::optional&, const c10::optional&, const Tensor &, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, bool, IntArrayRef, int64_t, bool, bool, bool, bool, std::array) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: ggI + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: ggW + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: ggb + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: gO + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: transposed + type: bool + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_padding + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: benchmark + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: cudnn_enabled + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: allow_tf32 + type: bool + - annotation: null + dynamic_type: std::array + is_nullable: false + name: output_mask + type: std::array + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + - dynamic_type: Tensor + name: result2 + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: conv1d + operator_name: conv1d + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::conv1d(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, int[1] padding=0, int[1] dilation=1, int groups=1) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 1 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 1 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 1 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const c10::optional&, IntArrayRef, IntArrayRef, IntArrayRef, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 1 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 1 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 1 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: conv2d + operator_name: conv2d + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::conv2d(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] dilation=1, int groups=1) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const c10::optional&, IntArrayRef, IntArrayRef, IntArrayRef, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: conv3d + operator_name: conv3d + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::conv3d(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] dilation=1, int groups=1) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 3 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 3 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 3 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const c10::optional&, IntArrayRef, IntArrayRef, IntArrayRef, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 3 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 3 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 3 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: conv_tbc + operator_name: conv_tbc + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::conv_tbc(Tensor self, Tensor weight, Tensor bias, int pad=0) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: bias + type: const Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: pad + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: bias + type: const Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: pad + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: conv_tbc_backward + operator_name: conv_tbc_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::conv_tbc_backward(Tensor self, Tensor input, Tensor weight, Tensor bias, int pad) -> (Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: bias + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: pad + type: int64_t + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, const Tensor &, const Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: bias + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: pad + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + - dynamic_type: Tensor + name: result2 + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: conv_transpose1d + operator_name: conv_transpose1d + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::conv_transpose1d(Tensor input, Tensor weight, Tensor? bias=None, int[1] stride=1, int[1] padding=0, int[1] output_padding=0, int groups=1, int[1] dilation=1) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 1 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 1 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: output_padding + size: 1 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 1 + type: IntArrayRef + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const c10::optional&, IntArrayRef, IntArrayRef, IntArrayRef, int64_t, IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 1 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 1 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: output_padding + size: 1 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 1 + type: IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: conv_transpose2d + operator_name: conv_transpose2d + overload_name: input + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::conv_transpose2d.input(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] output_padding=0, int groups=1, int[2] dilation=1) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: output_padding + size: 2 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: IntArrayRef + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const c10::optional&, IntArrayRef, IntArrayRef, IntArrayRef, int64_t, IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: output_padding + size: 2 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: conv_transpose3d + operator_name: conv_transpose3d + overload_name: input + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::conv_transpose3d.input(Tensor input, Tensor weight, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] output_padding=0, int groups=1, int[3] dilation=1) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 3 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 3 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: output_padding + size: 3 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 3 + type: IntArrayRef + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const c10::optional&, IntArrayRef, IntArrayRef, IntArrayRef, int64_t, IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 3 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 3 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: output_padding + size: 3 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 3 + type: IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: copy_ + operator_name: copy_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::copy_(Tensor(a!) self, Tensor src, bool non_blocking=False) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: src + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: non_blocking + type: bool + schema_order_cpp_signature: Tensor & (Tensor &, const Tensor &, bool) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: src + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: non_blocking + type: bool + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _copy_from + operator_name: _copy_from + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_copy_from(Tensor self, Tensor dst, bool non_blocking=False) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: dst + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: non_blocking + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: dst + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: non_blocking + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cos + operator_name: cos + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::cos(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cos_ + operator_name: cos_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::cos_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + schema_order_cpp_signature: Tensor & (Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cos_out + operator_name: cos + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::cos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cosh + operator_name: cosh + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::cosh(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cosh_ + operator_name: cosh_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::cosh_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + schema_order_cpp_signature: Tensor & (Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cosh_out + operator_name: cosh + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::cosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cosine_embedding_loss + operator_name: cosine_embedding_loss + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::cosine_embedding_loss(Tensor input1, Tensor input2, Tensor target, float margin=0.0, int reduction=Mean) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input1 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input2 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + default: 0.0 + dynamic_type: double + is_nullable: false + name: margin + type: double + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const Tensor &, double, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input1 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input2 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + default: 0.0 + dynamic_type: double + is_nullable: false + name: margin + type: double + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: count_nonzero + operator_name: count_nonzero + overload_name: dim_IntList + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::count_nonzero.dim_IntList(Tensor self, int[] dim) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dim + type: IntArrayRef + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dim + type: IntArrayRef + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: count_nonzero + operator_name: count_nonzero + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::count_nonzero(Tensor self, int? dim=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cudnn_affine_grid_generator + operator_name: cudnn_affine_grid_generator + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::cudnn_affine_grid_generator(Tensor theta, int N, int C, int H, int W) -> Tensor grid + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: theta + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: N + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: C + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: H + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: W + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, int64_t, int64_t, int64_t, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: theta + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: N + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: C + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: H + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: W + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + field_name: grid + name: grid + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cudnn_affine_grid_generator_backward + operator_name: cudnn_affine_grid_generator_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::cudnn_affine_grid_generator_backward(Tensor grad, int N, int C, int H, int W) -> Tensor grad_theta + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: N + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: C + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: H + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: W + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, int64_t, int64_t, int64_t, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: N + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: C + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: H + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: W + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + field_name: grad_theta + name: grad_theta + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cudnn_batch_norm + operator_name: cudnn_batch_norm + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::cudnn_batch_norm(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon) -> (Tensor, Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: running_mean + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: running_var + type: const c10::optional& + - annotation: null + dynamic_type: bool + is_nullable: false + name: training + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: exponential_average_factor + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: epsilon + type: double + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, const c10::optional&, const c10::optional&, const c10::optional&, bool, double, double) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: running_mean + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: running_var + type: const c10::optional& + - annotation: null + dynamic_type: bool + is_nullable: false + name: training + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: exponential_average_factor + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: epsilon + type: double + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + - dynamic_type: Tensor + name: result2 + type: Tensor + - dynamic_type: Tensor + name: result3 + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cudnn_batch_norm_backward + operator_name: cudnn_batch_norm_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::cudnn_batch_norm_backward(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon, Tensor reserveSpace) -> (Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: running_mean + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: running_var + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: save_mean + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: save_var + type: const c10::optional& + - annotation: null + dynamic_type: double + is_nullable: false + name: epsilon + type: double + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: reserveSpace + type: const Tensor & + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, const Tensor &, const c10::optional&, const c10::optional&, const c10::optional&, const c10::optional&, double, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: running_mean + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: running_var + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: save_mean + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: save_var + type: const c10::optional& + - annotation: null + dynamic_type: double + is_nullable: false + name: epsilon + type: double + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: reserveSpace + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + - dynamic_type: Tensor + name: result2 + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cudnn_convolution + operator_name: cudnn_convolution + overload_name: deprecated + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::cudnn_convolution.deprecated(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: benchmark + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const c10::optional&, IntArrayRef, IntArrayRef, IntArrayRef, int64_t, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: benchmark + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cudnn_convolution + operator_name: cudnn_convolution + overload_name: deprecated2 + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::cudnn_convolution.deprecated2(Tensor self, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: benchmark + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, int64_t, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: benchmark + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cudnn_convolution + operator_name: cudnn_convolution + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::cudnn_convolution(Tensor self, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: benchmark + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: allow_tf32 + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, int64_t, bool, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: benchmark + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: allow_tf32 + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cudnn_convolution_backward_input + operator_name: cudnn_convolution_backward_input + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::cudnn_convolution_backward_input(int[] self_size, Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor + arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: self_size + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: benchmark + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: allow_tf32 + type: bool + schema_order_cpp_signature: Tensor (IntArrayRef, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, int64_t, bool, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: self_size + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: benchmark + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: allow_tf32 + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cudnn_convolution_backward + operator_name: cudnn_convolution_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::cudnn_convolution_backward(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32, bool[2] output_mask) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: benchmark + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: allow_tf32 + type: bool + - annotation: null + dynamic_type: std::array + is_nullable: false + name: output_mask + type: std::array + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, int64_t, bool, bool, bool, std::array) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: benchmark + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: allow_tf32 + type: bool + - annotation: null + dynamic_type: std::array + is_nullable: false + name: output_mask + type: std::array + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cudnn_convolution_backward_weight + operator_name: cudnn_convolution_backward_weight + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::cudnn_convolution_backward_weight(int[] weight_size, Tensor grad_output, Tensor self, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor + arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: weight_size + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: benchmark + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: allow_tf32 + type: bool + schema_order_cpp_signature: Tensor (IntArrayRef, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, int64_t, bool, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: weight_size + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: benchmark + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: allow_tf32 + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cudnn_convolution_transpose + operator_name: cudnn_convolution_transpose + overload_name: deprecated + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::cudnn_convolution_transpose.deprecated(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: benchmark + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const c10::optional&, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, int64_t, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: benchmark + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cudnn_convolution_transpose + operator_name: cudnn_convolution_transpose + overload_name: deprecated2 + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::cudnn_convolution_transpose.deprecated2(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: benchmark + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, int64_t, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: benchmark + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cudnn_convolution_transpose + operator_name: cudnn_convolution_transpose + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::cudnn_convolution_transpose(Tensor self, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: benchmark + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: allow_tf32 + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, int64_t, bool, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: benchmark + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: allow_tf32 + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cudnn_convolution_transpose_backward + operator_name: cudnn_convolution_transpose_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::cudnn_convolution_transpose_backward(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32, bool[2] output_mask) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: benchmark + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: allow_tf32 + type: bool + - annotation: null + dynamic_type: std::array + is_nullable: false + name: output_mask + type: std::array + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, int64_t, bool, bool, bool, std::array) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: benchmark + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: allow_tf32 + type: bool + - annotation: null + dynamic_type: std::array + is_nullable: false + name: output_mask + type: std::array + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cudnn_convolution_transpose_backward_input + operator_name: cudnn_convolution_transpose_backward_input + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::cudnn_convolution_transpose_backward_input(Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: benchmark + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: allow_tf32 + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, int64_t, bool, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: benchmark + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: allow_tf32 + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cudnn_convolution_transpose_backward_weight + operator_name: cudnn_convolution_transpose_backward_weight + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::cudnn_convolution_transpose_backward_weight(int[] weight_size, Tensor grad_output, Tensor self, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool allow_tf32) -> Tensor + arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: weight_size + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: benchmark + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: allow_tf32 + type: bool + schema_order_cpp_signature: Tensor (IntArrayRef, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, int64_t, bool, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: weight_size + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: benchmark + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: allow_tf32 + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cudnn_grid_sampler + operator_name: cudnn_grid_sampler + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::cudnn_grid_sampler(Tensor self, Tensor grid) -> Tensor output + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grid + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grid + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + field_name: output + name: output + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cudnn_grid_sampler_backward + operator_name: cudnn_grid_sampler_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::cudnn_grid_sampler_backward(Tensor self, Tensor grid, Tensor grad_output) -> (Tensor grad_self, Tensor grad_grid) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grid + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grid + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + field_name: grad_self + name: grad_self + type: Tensor + - dynamic_type: Tensor + field_name: grad_grid + name: grad_grid + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cummax + operator_name: cummax + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::cummax(Tensor self, int dim) -> (Tensor values, Tensor indices) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: std::tuple (const Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + field_name: values + name: values + type: Tensor + - dynamic_type: Tensor + field_name: indices + name: indices + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cummax_out + operator_name: cummax + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::cummax.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + field_name: values + is_nullable: false + name: values + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + field_name: indices + is_nullable: false + name: indices + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: std::tuple (const Tensor &, int64_t, Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - allocate: true + annotation: a! + dynamic_type: Tensor + field_name: values + is_nullable: false + name: values + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + field_name: indices + is_nullable: false + name: indices + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + field_name: values + name: values + type: Tensor & + - dynamic_type: Tensor + field_name: indices + name: indices + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cummax + operator_name: cummax + overload_name: dimname + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::cummax.dimname(Tensor self, Dimname dim) -> (Tensor values, Tensor indices) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + schema_order_cpp_signature: std::tuple (const Tensor &, Dimname) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + field_name: values + name: values + type: Tensor + - dynamic_type: Tensor + field_name: indices + name: indices + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cummax_out + operator_name: cummax + overload_name: dimname_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::cummax.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + field_name: values + is_nullable: false + name: values + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + field_name: indices + is_nullable: false + name: indices + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + schema_order_cpp_signature: std::tuple (const Tensor &, Dimname, Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - allocate: true + annotation: a! + dynamic_type: Tensor + field_name: values + is_nullable: false + name: values + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + field_name: indices + is_nullable: false + name: indices + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + field_name: values + name: values + type: Tensor & + - dynamic_type: Tensor + field_name: indices + name: indices + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _cummax_helper + operator_name: _cummax_helper + overload_name: '' + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_cummax_helper(Tensor self, Tensor(a!) values, Tensor(b!) indices, int dim) -> () + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: values + type: Tensor & + - annotation: b! + dynamic_type: Tensor + is_nullable: false + name: indices + type: Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: void (const Tensor &, Tensor &, Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: values + type: Tensor & + - annotation: b! + dynamic_type: Tensor + is_nullable: false + name: indices + type: Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cummin + operator_name: cummin + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::cummin(Tensor self, int dim) -> (Tensor values, Tensor indices) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: std::tuple (const Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + field_name: values + name: values + type: Tensor + - dynamic_type: Tensor + field_name: indices + name: indices + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cummin_out + operator_name: cummin + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::cummin.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + field_name: values + is_nullable: false + name: values + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + field_name: indices + is_nullable: false + name: indices + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: std::tuple (const Tensor &, int64_t, Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - allocate: true + annotation: a! + dynamic_type: Tensor + field_name: values + is_nullable: false + name: values + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + field_name: indices + is_nullable: false + name: indices + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + field_name: values + name: values + type: Tensor & + - dynamic_type: Tensor + field_name: indices + name: indices + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cummin + operator_name: cummin + overload_name: dimname + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::cummin.dimname(Tensor self, Dimname dim) -> (Tensor values, Tensor indices) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + schema_order_cpp_signature: std::tuple (const Tensor &, Dimname) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + field_name: values + name: values + type: Tensor + - dynamic_type: Tensor + field_name: indices + name: indices + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cummin_out + operator_name: cummin + overload_name: dimname_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::cummin.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + field_name: values + is_nullable: false + name: values + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + field_name: indices + is_nullable: false + name: indices + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + schema_order_cpp_signature: std::tuple (const Tensor &, Dimname, Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - allocate: true + annotation: a! + dynamic_type: Tensor + field_name: values + is_nullable: false + name: values + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + field_name: indices + is_nullable: false + name: indices + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + field_name: values + name: values + type: Tensor & + - dynamic_type: Tensor + field_name: indices + name: indices + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _cummin_helper + operator_name: _cummin_helper + overload_name: '' + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_cummin_helper(Tensor self, Tensor(a!) values, Tensor(b!) indices, int dim) -> () + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: values + type: Tensor & + - annotation: b! + dynamic_type: Tensor + is_nullable: false + name: indices + type: Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: void (const Tensor &, Tensor &, Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: values + type: Tensor & + - annotation: b! + dynamic_type: Tensor + is_nullable: false + name: indices + type: Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cummaxmin_backward + operator_name: cummaxmin_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::cummaxmin_backward(Tensor grad, Tensor input, Tensor indices, int dim) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: cumprod + operator_name: cumprod + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::cumprod(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, int64_t, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cumprod_out + operator_name: cumprod + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::cumprod.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: Tensor & (const Tensor &, int64_t, c10::optional, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cumprod + operator_name: cumprod + overload_name: dimname + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::cumprod.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, Dimname, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cumprod_out + operator_name: cumprod + overload_name: dimname_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::cumprod.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: Tensor & (const Tensor &, Dimname, c10::optional, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cumprod_backward + operator_name: cumprod_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::cumprod_backward(Tensor grad, Tensor input, int dim) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: cumsum + operator_name: cumsum + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::cumsum(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, int64_t, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cumsum_out + operator_name: cumsum + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::cumsum.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: Tensor & (const Tensor &, int64_t, c10::optional, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cumsum + operator_name: cumsum + overload_name: dimname + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::cumsum.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, Dimname, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cumsum_out + operator_name: cumsum + overload_name: dimname_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::cumsum.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: Tensor & (const Tensor &, Dimname, c10::optional, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: ctc_loss + operator_name: ctc_loss + overload_name: IntList + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::ctc_loss.IntList(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, int reduction=Mean, bool zero_infinity=False) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: log_probs + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: targets + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: input_lengths + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: target_lengths + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: blank + type: int64_t + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: zero_infinity + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, int64_t, int64_t, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: log_probs + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: targets + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: input_lengths + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: target_lengths + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: blank + type: int64_t + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: zero_infinity + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: ctc_loss + operator_name: ctc_loss + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, int reduction=Mean, bool zero_infinity=False) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: log_probs + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: targets + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input_lengths + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target_lengths + type: const Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: blank + type: int64_t + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: zero_infinity + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const Tensor &, const Tensor &, int64_t, int64_t, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: log_probs + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: targets + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input_lengths + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target_lengths + type: const Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: blank + type: int64_t + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: zero_infinity + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _ctc_loss + operator_name: _ctc_loss + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, bool zero_infinity=False) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: log_probs + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: targets + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: input_lengths + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: target_lengths + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: blank + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: zero_infinity + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, int64_t, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: log_probs + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: targets + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: input_lengths + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: target_lengths + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: blank + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: zero_infinity + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _ctc_loss_backward + operator_name: _ctc_loss_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_ctc_loss_backward(Tensor grad, Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, Tensor neg_log_likelihood, Tensor log_alpha, int blank, bool zero_infinity=False) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: log_probs + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: targets + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: input_lengths + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: target_lengths + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: neg_log_likelihood + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: log_alpha + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: blank + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: zero_infinity + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, const Tensor &, const Tensor &, int64_t, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: log_probs + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: targets + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: input_lengths + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: target_lengths + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: neg_log_likelihood + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: log_alpha + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: blank + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: zero_infinity + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: diag_embed + operator_name: diag_embed + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::diag_embed(Tensor self, int offset=0, int dim1=-2, int dim2=-1) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: offset + type: int64_t + - annotation: null + default: -2 + dynamic_type: int64_t + is_nullable: false + name: dim1 + type: int64_t + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim2 + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, int64_t, int64_t, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: offset + type: int64_t + - annotation: null + default: -2 + dynamic_type: int64_t + is_nullable: false + name: dim1 + type: int64_t + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim2 + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: diagflat + operator_name: diagflat + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::diagflat(Tensor self, int offset=0) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: offset + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: offset + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: diagonal + operator_name: diagonal + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::diagonal(Tensor(a) self, int offset=0, int dim1=0, int dim2=1) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: offset + type: int64_t + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim1 + type: int64_t + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: dim2 + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, int64_t, int64_t, int64_t) + schema_order_arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: offset + type: int64_t + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim1 + type: int64_t + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: dim2 + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: diagonal + operator_name: diagonal + overload_name: Dimname + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::diagonal.Dimname(Tensor(a) self, *, Dimname outdim, Dimname dim1, Dimname dim2, int offset=0) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + kwarg_only: true + name: outdim + type: Dimname + - annotation: null + dynamic_type: Dimname + is_nullable: false + kwarg_only: true + name: dim1 + type: Dimname + - annotation: null + dynamic_type: Dimname + is_nullable: false + kwarg_only: true + name: dim2 + type: Dimname + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: offset + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, Dimname, Dimname, Dimname, int64_t) + schema_order_arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + kwarg_only: true + name: outdim + type: Dimname + - annotation: null + dynamic_type: Dimname + is_nullable: false + kwarg_only: true + name: dim1 + type: Dimname + - annotation: null + dynamic_type: Dimname + is_nullable: false + kwarg_only: true + name: dim2 + type: Dimname + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: offset + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: diagonal_backward + operator_name: diagonal_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::diagonal_backward(Tensor grad, int[] input_sizes, int offset, int dim1, int dim2) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: input_sizes + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: offset + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim1 + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim2 + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef, int64_t, int64_t, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: input_sizes + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: offset + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim1 + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim2 + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: fill_diagonal_ + operator_name: fill_diagonal_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::fill_diagonal_(Tensor(a!) self, Scalar fill_value, bool wrap=False) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: fill_value + type: Scalar + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: wrap + type: bool + schema_order_cpp_signature: Tensor & (Tensor &, Scalar, bool) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: fill_value + type: Scalar + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: wrap + type: bool + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: div + operator_name: div + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::div.Tensor(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: div_ + operator_name: div_ + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::div_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (Tensor &, const Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: div_out + operator_name: div + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::div.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: div + operator_name: div + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::div.Scalar(Tensor self, Scalar other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: div_ + operator_name: div_ + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::div_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor & (Tensor &, Scalar) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: divide + operator_name: divide + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::divide.Tensor(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: divide_ + operator_name: divide_ + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (Tensor &, const Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: divide_out + operator_name: divide + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: divide + operator_name: divide + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::divide.Scalar(Tensor self, Scalar other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: divide_ + operator_name: divide_ + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor & (Tensor &, Scalar) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: true_divide + operator_name: true_divide + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::true_divide.Tensor(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: true_divide_ + operator_name: true_divide_ + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::true_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (Tensor &, const Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: true_divide_out + operator_name: true_divide + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::true_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: true_divide + operator_name: true_divide + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::true_divide.Scalar(Tensor self, Scalar other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: true_divide_ + operator_name: true_divide_ + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::true_divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor & (Tensor &, Scalar) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: dot + operator_name: dot + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::dot(Tensor self, Tensor tensor) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: tensor + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: tensor + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: dot_out + operator_name: dot + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::dot.out(Tensor self, Tensor tensor, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: tensor + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: tensor + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: vdot + operator_name: vdot + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::vdot(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: vdot_out + operator_name: vdot + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::vdot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: einsum + operator_name: einsum + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::einsum(str equation, Tensor[] tensors) -> Tensor + arguments: + - annotation: null + dynamic_type: std::string + is_nullable: false + name: equation + type: std::string + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors + type: TensorList + schema_order_cpp_signature: Tensor (std::string, TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: std::string + is_nullable: false + name: equation + type: std::string + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors + type: TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: embedding + operator_name: embedding + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::embedding(Tensor weight, Tensor indices, int padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: padding_idx + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: scale_grad_by_freq + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: sparse + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, int64_t, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: padding_idx + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: scale_grad_by_freq + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: sparse + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: embedding_backward + operator_name: embedding_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::embedding_backward(Tensor grad, Tensor indices, int num_weights, int padding_idx, bool scale_grad_by_freq, bool sparse) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_weights + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: padding_idx + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: scale_grad_by_freq + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: sparse + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, int64_t, int64_t, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_weights + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: padding_idx + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: scale_grad_by_freq + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: sparse + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: embedding_dense_backward + operator_name: embedding_dense_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::embedding_dense_backward(Tensor grad_output, Tensor indices, int num_weights, int padding_idx, bool scale_grad_by_freq) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_weights + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: padding_idx + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: scale_grad_by_freq + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, int64_t, int64_t, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_weights + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: padding_idx + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: scale_grad_by_freq + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: embedding_renorm_ + operator_name: embedding_renorm_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::embedding_renorm_(Tensor(a!) self, Tensor indices, float max_norm, float norm_type) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: max_norm + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: norm_type + type: double + schema_order_cpp_signature: Tensor & (Tensor &, const Tensor &, double, double) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: max_norm + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: norm_type + type: double + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: embedding_sparse_backward + operator_name: embedding_sparse_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::embedding_sparse_backward(Tensor grad, Tensor indices, int num_weights, int padding_idx, bool scale_grad_by_freq) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_weights + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: padding_idx + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: scale_grad_by_freq + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, int64_t, int64_t, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_weights + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: padding_idx + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: scale_grad_by_freq + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _embedding_bag_forward_only + operator_name: _embedding_bag_forward_only + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_embedding_bag_forward_only(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False) -> (Tensor, Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: offsets + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: scale_grad_by_freq + type: bool + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: mode + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: sparse + type: bool + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: per_sample_weights + type: const c10::optional& + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: include_last_offset + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, const Tensor &, bool, int64_t, bool, const c10::optional&, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: offsets + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: scale_grad_by_freq + type: bool + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: mode + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: sparse + type: bool + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: per_sample_weights + type: const c10::optional& + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: include_last_offset + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + - dynamic_type: Tensor + name: result2 + type: Tensor + - dynamic_type: Tensor + name: result3 + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: embedding_bag + operator_name: embedding_bag + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False) -> (Tensor, Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: offsets + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: scale_grad_by_freq + type: bool + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: mode + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: sparse + type: bool + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: per_sample_weights + type: const c10::optional& + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: include_last_offset + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, const Tensor &, bool, int64_t, bool, const c10::optional&, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: offsets + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: scale_grad_by_freq + type: bool + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: mode + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: sparse + type: bool + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: per_sample_weights + type: const c10::optional& + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: include_last_offset + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + - dynamic_type: Tensor + name: result2 + type: Tensor + - dynamic_type: Tensor + name: result3 + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _embedding_bag + operator_name: _embedding_bag + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False) -> (Tensor, Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: offsets + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: scale_grad_by_freq + type: bool + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: mode + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: sparse + type: bool + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: per_sample_weights + type: const c10::optional& + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: include_last_offset + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, const Tensor &, bool, int64_t, bool, const c10::optional&, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: offsets + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: scale_grad_by_freq + type: bool + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: mode + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: sparse + type: bool + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: per_sample_weights + type: const c10::optional& + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: include_last_offset + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + - dynamic_type: Tensor + name: result2 + type: Tensor + - dynamic_type: Tensor + name: result3 + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _embedding_bag_backward + operator_name: _embedding_bag_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_embedding_bag_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, int num_weights, bool scale_grad_by_freq, int mode, bool sparse, Tensor? per_sample_weights) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: offsets + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: offset2bag + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: bag_size + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: maximum_indices + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_weights + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: scale_grad_by_freq + type: bool + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: mode + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: sparse + type: bool + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: per_sample_weights + type: const c10::optional& + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, int64_t, bool, int64_t, bool, const c10::optional&) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: offsets + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: offset2bag + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: bag_size + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: maximum_indices + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_weights + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: scale_grad_by_freq + type: bool + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: mode + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: sparse + type: bool + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: per_sample_weights + type: const c10::optional& + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _embedding_bag_sparse_backward + operator_name: _embedding_bag_sparse_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_embedding_bag_sparse_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, int num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: offsets + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: offset2bag + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: bag_size + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_weights + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: scale_grad_by_freq + type: bool + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: mode + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: per_sample_weights + type: const c10::optional& + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, int64_t, bool, int64_t, const c10::optional&) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: offsets + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: offset2bag + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: bag_size + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_weights + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: scale_grad_by_freq + type: bool + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: mode + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: per_sample_weights + type: const c10::optional& + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _embedding_bag_dense_backward + operator_name: _embedding_bag_dense_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_embedding_bag_dense_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, Tensor maximum_indices, int num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: offsets + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: offset2bag + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: bag_size + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: maximum_indices + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_weights + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: scale_grad_by_freq + type: bool + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: mode + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: per_sample_weights + type: const c10::optional& + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, int64_t, bool, int64_t, const c10::optional&) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: offsets + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: offset2bag + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: bag_size + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: maximum_indices + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_weights + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: scale_grad_by_freq + type: bool + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: mode + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: per_sample_weights + type: const c10::optional& + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _embedding_bag_per_sample_weights_backward + operator_name: _embedding_bag_per_sample_weights_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_embedding_bag_per_sample_weights_backward(Tensor grad, Tensor weight, Tensor indices, Tensor offsets, Tensor offset2bag, int mode) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: offsets + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: offset2bag + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: mode + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: offsets + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: offset2bag + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: mode + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: empty_meta + operator_name: empty_meta + overload_name: '' + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::empty_meta(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + - annotation: null + default: c10::nullopt + dynamic_type: MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + schema_order_cpp_signature: Tensor (IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: empty + operator_name: empty + overload_name: names + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::empty.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + dynamic_type: DimnameList + is_nullable: true + kwarg_only: true + name: names + type: c10::optional + - annotation: null + default: '{}' + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + - annotation: null + default: c10::nullopt + dynamic_type: MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + schema_order_cpp_signature: Tensor (IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + dynamic_type: DimnameList + is_nullable: true + kwarg_only: true + name: names + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: empty + operator_name: empty + overload_name: memory_format + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::empty.memory_format(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + - annotation: null + default: c10::nullopt + dynamic_type: MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + schema_order_cpp_signature: Tensor (IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: true + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: new_empty + operator_name: new_empty + overload_name: '' + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::new_empty(Tensor self, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: new_full + operator_name: new_full + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::new_full(Tensor self, int[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: fill_value + type: Scalar + - annotation: null + default: '{}' + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef, Scalar, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: fill_value + type: Scalar + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: new_zeros + operator_name: new_zeros + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::new_zeros(Tensor self, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _empty_affine_quantized + operator_name: _empty_affine_quantized + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_empty_affine_quantized(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, float scale=1, int zero_point=0, MemoryFormat? memory_format=contiguous_format) -> Tensor + arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + - annotation: null + default: 1 + dynamic_type: double + is_nullable: false + kwarg_only: true + name: scale + type: double + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: zero_point + type: int64_t + - annotation: null + default: MemoryFormat::Contiguous + dynamic_type: MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + schema_order_cpp_signature: Tensor (IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional, double, int64_t, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + - annotation: null + default: 1 + dynamic_type: double + is_nullable: false + kwarg_only: true + name: scale + type: double + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: zero_point + type: int64_t + - annotation: null + default: MemoryFormat::Contiguous + dynamic_type: MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: true + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _empty_per_channel_affine_quantized + operator_name: _empty_per_channel_affine_quantized + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: factory + matches_jit_signature: true + schema_string: aten::_empty_per_channel_affine_quantized(int[] size, *, Tensor scales, Tensor zero_points, int axis, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=contiguous_format) -> Tensor + arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: false + kwarg_only: true + name: scales + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + kwarg_only: true + name: zero_points + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: axis + type: int64_t + - annotation: null + default: '{}' + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + - annotation: null + default: MemoryFormat::Contiguous + dynamic_type: MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + schema_order_cpp_signature: Tensor (IntArrayRef, const Tensor &, const Tensor &, int64_t, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: false + kwarg_only: true + name: scales + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + kwarg_only: true + name: zero_points + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: axis + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + - annotation: null + default: MemoryFormat::Contiguous + dynamic_type: MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: true + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: resize_ + operator_name: resize_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::resize_(Tensor(a!) self, int[] size, *, MemoryFormat? memory_format=None) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + schema_order_cpp_signature: Tensor & (Tensor &, IntArrayRef, c10::optional) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: empty_quantized + operator_name: empty_quantized + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::empty_quantized(int[] size, Tensor qtensor) -> Tensor + arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: qtensor + type: const Tensor & + schema_order_cpp_signature: Tensor (IntArrayRef, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: qtensor + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: empty_out + operator_name: empty + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::empty.out(int[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + schema_order_cpp_signature: Tensor & (IntArrayRef, c10::optional, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: empty_like + operator_name: empty_like + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::empty_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: '{}' + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + - annotation: null + default: c10::nullopt + dynamic_type: MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: empty_strided + operator_name: empty_strided + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::empty_strided(int[] size, int[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + schema_order_cpp_signature: Tensor (IntArrayRef, IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: true + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: erf + operator_name: erf + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::erf(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: erf_ + operator_name: erf_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::erf_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + schema_order_cpp_signature: Tensor & (Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: erf_out + operator_name: erf + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: erfc + operator_name: erfc + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::erfc(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: erfc_ + operator_name: erfc_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::erfc_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + schema_order_cpp_signature: Tensor & (Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: erfc_out + operator_name: erfc + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::erfc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: exp + operator_name: exp + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::exp(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: exp_ + operator_name: exp_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::exp_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + schema_order_cpp_signature: Tensor & (Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: exp_out + operator_name: exp + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::exp.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: exp2 + operator_name: exp2 + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::exp2(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: exp2_ + operator_name: exp2_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::exp2_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + schema_order_cpp_signature: Tensor & (Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: exp2_out + operator_name: exp2 + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::exp2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: expm1 + operator_name: expm1 + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::expm1(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: expm1_ + operator_name: expm1_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::expm1_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + schema_order_cpp_signature: Tensor & (Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: expm1_out + operator_name: expm1 + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: expand + operator_name: expand + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::expand(Tensor(a) self, int[] size, *, bool implicit=False) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: implicit + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef, bool) + schema_order_arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: implicit + type: bool + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: expand_as + operator_name: expand_as + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::expand_as(Tensor(a) self, Tensor other) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: eye + operator_name: eye + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::eye(int n, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + - annotation: null + default: '{}' + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + schema_order_cpp_signature: Tensor (int64_t, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: eye + operator_name: eye + overload_name: m + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::eye.m(int n, int m, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: m + type: int64_t + - annotation: null + default: '{}' + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + schema_order_cpp_signature: Tensor (int64_t, int64_t, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: m + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: eye_out + operator_name: eye + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::eye.out(int n, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + schema_order_cpp_signature: Tensor & (int64_t, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: eye_out + operator_name: eye + overload_name: m_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::eye.m_out(int n, int m, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: m + type: int64_t + schema_order_cpp_signature: Tensor & (int64_t, int64_t, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: m + type: int64_t + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: flatten + operator_name: flatten + overload_name: using_ints + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::flatten.using_ints(Tensor(a) self, int start_dim=0, int end_dim=-1) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: start_dim + type: int64_t + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: end_dim + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, int64_t, int64_t) + schema_order_arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: start_dim + type: int64_t + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: end_dim + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: flatten + operator_name: flatten + overload_name: named_out_dim + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::flatten.named_out_dim(Tensor(a) self, int start_dim, int end_dim, Dimname out_dim) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: start_dim + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: end_dim + type: int64_t + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: out_dim + type: Dimname + schema_order_cpp_signature: Tensor (const Tensor &, int64_t, int64_t, Dimname) + schema_order_arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: start_dim + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: end_dim + type: int64_t + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: out_dim + type: Dimname + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: flatten + operator_name: flatten + overload_name: using_names + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::flatten.using_names(Tensor(a) self, Dimname start_dim, Dimname end_dim, Dimname out_dim) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: start_dim + type: Dimname + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: end_dim + type: Dimname + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: out_dim + type: Dimname + schema_order_cpp_signature: Tensor (const Tensor &, Dimname, Dimname, Dimname) + schema_order_arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: start_dim + type: Dimname + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: end_dim + type: Dimname + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: out_dim + type: Dimname + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: flatten + operator_name: flatten + overload_name: DimnameList + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::flatten.DimnameList(Tensor(a) self, Dimname[] dims, Dimname out_dim) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: DimnameList + is_nullable: false + name: dims + type: DimnameList + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: out_dim + type: Dimname + schema_order_cpp_signature: Tensor (const Tensor &, DimnameList, Dimname) + schema_order_arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: DimnameList + is_nullable: false + name: dims + type: DimnameList + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: out_dim + type: Dimname + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: unflatten + operator_name: unflatten + overload_name: int + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::unflatten.int(Tensor(a) self, int dim, int[] sizes, Dimname[]? names=None) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: sizes + type: IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: DimnameList + is_nullable: true + name: names + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, int64_t, IntArrayRef, c10::optional) + schema_order_arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: sizes + type: IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: DimnameList + is_nullable: true + name: names + type: c10::optional + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: unflatten + operator_name: unflatten + overload_name: Dimname + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::unflatten.Dimname(Tensor(a) self, Dimname dim, int[] sizes, Dimname[] names) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: sizes + type: IntArrayRef + - annotation: null + dynamic_type: DimnameList + is_nullable: false + name: names + type: DimnameList + schema_order_cpp_signature: Tensor (const Tensor &, Dimname, IntArrayRef, DimnameList) + schema_order_arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: sizes + type: IntArrayRef + - annotation: null + dynamic_type: DimnameList + is_nullable: false + name: names + type: DimnameList + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: fill_ + operator_name: fill_ + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::fill_.Scalar(Tensor(a!) self, Scalar value) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: value + type: Scalar + schema_order_cpp_signature: Tensor & (Tensor &, Scalar) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: value + type: Scalar + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: fill_ + operator_name: fill_ + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::fill_.Tensor(Tensor(a!) self, Tensor value) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: value + type: const Tensor & + schema_order_cpp_signature: Tensor & (Tensor &, const Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: value + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: floor + operator_name: floor + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::floor(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: floor_ + operator_name: floor_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::floor_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + schema_order_cpp_signature: Tensor & (Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: floor_out + operator_name: floor + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::floor.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: floor_divide + operator_name: floor_divide + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::floor_divide(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: floor_divide_ + operator_name: floor_divide_ + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::floor_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (Tensor &, const Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: floor_divide_out + operator_name: floor_divide + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::floor_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: floor_divide + operator_name: floor_divide + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::floor_divide.Scalar(Tensor self, Scalar other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: floor_divide_ + operator_name: floor_divide_ + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::floor_divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor & (Tensor &, Scalar) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: frac + operator_name: frac + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::frac(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: frac_ + operator_name: frac_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::frac_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + schema_order_cpp_signature: Tensor & (Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: frac_out + operator_name: frac + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::frac.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: full + operator_name: full + overload_name: names + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::full.names(int[] size, Scalar fill_value, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: fill_value + type: Scalar + - annotation: null + dynamic_type: DimnameList + is_nullable: true + kwarg_only: true + name: names + type: c10::optional + - annotation: null + default: '{}' + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + schema_order_cpp_signature: Tensor (IntArrayRef, Scalar, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: fill_value + type: Scalar + - annotation: null + dynamic_type: DimnameList + is_nullable: true + kwarg_only: true + name: names + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: full + operator_name: full + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::full(int[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: fill_value + type: Scalar + - annotation: null + default: '{}' + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + schema_order_cpp_signature: Tensor (IntArrayRef, Scalar, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: fill_value + type: Scalar + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: full_out + operator_name: full + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::full.out(int[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: fill_value + type: Scalar + schema_order_cpp_signature: Tensor & (IntArrayRef, Scalar, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: fill_value + type: Scalar + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: full_like + operator_name: full_like + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::full_like(Tensor self, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: fill_value + type: Scalar + - annotation: null + default: '{}' + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + - annotation: null + default: c10::nullopt + dynamic_type: MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, Scalar, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: fill_value + type: Scalar + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: from_file + operator_name: from_file + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::from_file(str filename, bool? shared=None, int? size=0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: std::string + is_nullable: false + name: filename + type: std::string + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + name: shared + type: c10::optional + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: true + name: size + type: c10::optional + - annotation: null + default: '{}' + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + schema_order_cpp_signature: Tensor (std::string, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: std::string + is_nullable: false + name: filename + type: std::string + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + name: shared + type: c10::optional + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: true + name: size + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: true + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: gcd_out + operator_name: gcd + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::gcd.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: gcd + operator_name: gcd + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::gcd(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: gcd_ + operator_name: gcd_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::gcd_(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (Tensor &, const Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: lcm_out + operator_name: lcm + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::lcm.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: lcm + operator_name: lcm + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::lcm(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: lcm_ + operator_name: lcm_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::lcm_(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (Tensor &, const Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: grid_sampler + operator_name: grid_sampler + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::grid_sampler(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grid + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: interpolation_mode + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: padding_mode + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, int64_t, int64_t, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grid + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: interpolation_mode + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: padding_mode + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: grid_sampler_2d + operator_name: grid_sampler_2d + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::grid_sampler_2d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grid + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: interpolation_mode + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: padding_mode + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, int64_t, int64_t, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grid + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: interpolation_mode + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: padding_mode + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: grid_sampler_2d_backward + operator_name: grid_sampler_2d_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::grid_sampler_2d_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grid + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: interpolation_mode + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: padding_mode + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, const Tensor &, int64_t, int64_t, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grid + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: interpolation_mode + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: padding_mode + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _grid_sampler_2d_cpu_fallback + operator_name: _grid_sampler_2d_cpu_fallback + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_grid_sampler_2d_cpu_fallback(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grid + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: interpolation_mode + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: padding_mode + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, int64_t, int64_t, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grid + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: interpolation_mode + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: padding_mode + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _grid_sampler_2d_cpu_fallback_backward + operator_name: _grid_sampler_2d_cpu_fallback_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_grid_sampler_2d_cpu_fallback_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grid + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: interpolation_mode + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: padding_mode + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, const Tensor &, int64_t, int64_t, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grid + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: interpolation_mode + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: padding_mode + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: grid_sampler_3d + operator_name: grid_sampler_3d + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::grid_sampler_3d(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grid + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: interpolation_mode + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: padding_mode + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, int64_t, int64_t, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grid + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: interpolation_mode + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: padding_mode + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: grid_sampler_3d_backward + operator_name: grid_sampler_3d_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::grid_sampler_3d_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grid + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: interpolation_mode + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: padding_mode + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, const Tensor &, int64_t, int64_t, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grid + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: interpolation_mode + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: padding_mode + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: hann_window + operator_name: hann_window + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::hann_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: window_length + type: int64_t + - annotation: null + default: '{}' + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + schema_order_cpp_signature: Tensor (int64_t, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: window_length + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: hann_window + operator_name: hann_window + overload_name: periodic + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::hann_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: window_length + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: periodic + type: bool + - annotation: null + default: '{}' + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + schema_order_cpp_signature: Tensor (int64_t, bool, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: window_length + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: periodic + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: hamming_window + operator_name: hamming_window + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::hamming_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: window_length + type: int64_t + - annotation: null + default: '{}' + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + schema_order_cpp_signature: Tensor (int64_t, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: window_length + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: hamming_window + operator_name: hamming_window + overload_name: periodic + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::hamming_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: window_length + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: periodic + type: bool + - annotation: null + default: '{}' + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + schema_order_cpp_signature: Tensor (int64_t, bool, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: window_length + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: periodic + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: hamming_window + operator_name: hamming_window + overload_name: periodic_alpha + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::hamming_window.periodic_alpha(int window_length, bool periodic, float alpha, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: window_length + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: periodic + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: alpha + type: double + - annotation: null + default: '{}' + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + schema_order_cpp_signature: Tensor (int64_t, bool, double, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: window_length + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: periodic + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: alpha + type: double + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: hamming_window + operator_name: hamming_window + overload_name: periodic_alpha_beta + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::hamming_window.periodic_alpha_beta(int window_length, bool periodic, float alpha, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: window_length + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: periodic + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: alpha + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: beta + type: double + - annotation: null + default: '{}' + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + schema_order_cpp_signature: Tensor (int64_t, bool, double, double, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: window_length + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: periodic + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: alpha + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: beta + type: double + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: kaiser_window + operator_name: kaiser_window + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::kaiser_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: window_length + type: int64_t + - annotation: null + default: '{}' + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + schema_order_cpp_signature: Tensor (int64_t, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: window_length + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: kaiser_window + operator_name: kaiser_window + overload_name: periodic + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::kaiser_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: window_length + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: periodic + type: bool + - annotation: null + default: '{}' + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + schema_order_cpp_signature: Tensor (int64_t, bool, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: window_length + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: periodic + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: kaiser_window + operator_name: kaiser_window + overload_name: beta + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::kaiser_window.beta(int window_length, bool periodic, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: window_length + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: periodic + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: beta + type: double + - annotation: null + default: '{}' + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + schema_order_cpp_signature: Tensor (int64_t, bool, double, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: window_length + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: periodic + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: beta + type: double + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: hinge_embedding_loss + operator_name: hinge_embedding_loss + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::hinge_embedding_loss(Tensor self, Tensor target, float margin=1.0, int reduction=Mean) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + default: 1.0 + dynamic_type: double + is_nullable: false + name: margin + type: double + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, double, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + default: 1.0 + dynamic_type: double + is_nullable: false + name: margin + type: double + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: group_norm + operator_name: group_norm + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::group_norm(Tensor input, int num_groups, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enabled=True) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_groups + type: int64_t + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: weight + type: const c10::optional& + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + default: 1.0e-05 + dynamic_type: double + is_nullable: false + name: eps + type: double + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: cudnn_enabled + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, int64_t, const c10::optional&, const c10::optional&, double, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_groups + type: int64_t + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: weight + type: const c10::optional& + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + default: 1.0e-05 + dynamic_type: double + is_nullable: false + name: eps + type: double + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: cudnn_enabled + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: native_group_norm + operator_name: native_group_norm + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::native_group_norm(Tensor input, Tensor? weight, Tensor? bias, int N, int C, int HxW, int group, float eps) -> (Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: weight + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: N + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: C + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: HxW + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: group + type: int64_t + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + schema_order_cpp_signature: std::tuple (const Tensor &, const c10::optional&, const c10::optional&, int64_t, int64_t, int64_t, int64_t, double) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: weight + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: N + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: C + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: HxW + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: group + type: int64_t + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + - dynamic_type: Tensor + name: result2 + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: true +- name: native_group_norm_backward + operator_name: native_group_norm_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::native_group_norm_backward(Tensor grad_out, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, int N, int C, int HxW, int group, bool[3] output_mask) -> (Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_out + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mean + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: rstd + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: weight + type: const c10::optional& + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: N + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: C + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: HxW + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: group + type: int64_t + - annotation: null + dynamic_type: std::array + is_nullable: false + name: output_mask + type: std::array + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, const Tensor &, const Tensor &, const c10::optional&, int64_t, int64_t, int64_t, int64_t, std::array) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_out + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mean + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: rstd + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: weight + type: const c10::optional& + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: N + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: C + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: HxW + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: group + type: int64_t + - annotation: null + dynamic_type: std::array + is_nullable: false + name: output_mask + type: std::array + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + - dynamic_type: Tensor + name: result2 + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: ifft + operator_name: ifft + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::ifft(Tensor self, int signal_ndim, bool normalized=False) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: signal_ndim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: normalized + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, int64_t, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: signal_ndim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: normalized + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: rfft + operator_name: rfft + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::rfft(Tensor self, int signal_ndim, bool normalized=False, bool onesided=True) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: signal_ndim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: normalized + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: onesided + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, int64_t, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: signal_ndim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: normalized + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: onesided + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: irfft + operator_name: irfft + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::irfft(Tensor self, int signal_ndim, bool normalized=False, bool onesided=True, int[] signal_sizes=[]) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: signal_ndim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: normalized + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: onesided + type: bool + - annotation: null + default: '{}' + dynamic_type: IntArrayRef + is_nullable: false + name: signal_sizes + type: IntArrayRef + schema_order_cpp_signature: Tensor (const Tensor &, int64_t, bool, bool, IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: signal_ndim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: normalized + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: onesided + type: bool + - annotation: null + default: '{}' + dynamic_type: IntArrayRef + is_nullable: false + name: signal_sizes + type: IntArrayRef + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _fft_with_size + operator_name: _fft_with_size + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_fft_with_size(Tensor self, int signal_ndim, bool complex_input, bool complex_output, bool inverse, int[] checked_signal_sizes, bool normalized, bool onesided, int[] output_sizes) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: signal_ndim + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: complex_input + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: complex_output + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: inverse + type: bool + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: checked_signal_sizes + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: normalized + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: onesided + type: bool + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_sizes + type: IntArrayRef + schema_order_cpp_signature: Tensor (const Tensor &, int64_t, bool, bool, bool, IntArrayRef, bool, bool, IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: signal_ndim + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: complex_input + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: complex_output + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: inverse + type: bool + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: checked_signal_sizes + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: normalized + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: onesided + type: bool + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_sizes + type: IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _fft_with_size + operator_name: _fft_with_size + overload_name: norm_modes + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_fft_with_size.norm_modes(Tensor self, int signal_ndim, bool complex_input, bool complex_output, bool inverse, int[] checked_signal_sizes, int normalization, bool onesided, int[] output_sizes) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: signal_ndim + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: complex_input + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: complex_output + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: inverse + type: bool + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: checked_signal_sizes + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: normalization + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: onesided + type: bool + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_sizes + type: IntArrayRef + schema_order_cpp_signature: Tensor (const Tensor &, int64_t, bool, bool, bool, IntArrayRef, int64_t, bool, IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: signal_ndim + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: complex_input + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: complex_output + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: inverse + type: bool + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: checked_signal_sizes + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: normalization + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: onesided + type: bool + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_sizes + type: IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _cufft_get_plan_cache_size + operator_name: _cufft_get_plan_cache_size + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_cufft_get_plan_cache_size(int device_index) -> int + arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: device_index + type: int64_t + schema_order_cpp_signature: int64_t (int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: device_index + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: int64_t + name: result + type: int64_t + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _cufft_get_plan_cache_max_size + operator_name: _cufft_get_plan_cache_max_size + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_cufft_get_plan_cache_max_size(int device_index) -> int + arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: device_index + type: int64_t + schema_order_cpp_signature: int64_t (int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: device_index + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: int64_t + name: result + type: int64_t + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _cufft_set_plan_cache_max_size + operator_name: _cufft_set_plan_cache_max_size + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_cufft_set_plan_cache_max_size(int device_index, int max_size) -> () + arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: device_index + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: max_size + type: int64_t + schema_order_cpp_signature: void (int64_t, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: device_index + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: max_size + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _cufft_clear_plan_cache + operator_name: _cufft_clear_plan_cache + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_cufft_clear_plan_cache(int device_index) -> () + arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: device_index + type: int64_t + schema_order_cpp_signature: void (int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: device_index + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: index + operator_name: index + overload_name: Tensor + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::index.Tensor(Tensor self, Tensor?[] indices) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: TensorList + is_nullable: true + name: indices + type: TensorList + schema_order_cpp_signature: Tensor (const Tensor &, TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: TensorList + is_nullable: true + name: indices + type: TensorList + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: index_copy_ + operator_name: index_copy_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::index_copy_(Tensor(a!) self, int dim, Tensor index, Tensor source) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: source + type: const Tensor & + schema_order_cpp_signature: Tensor & (Tensor &, int64_t, const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: source + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: index_copy + operator_name: index_copy + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::index_copy(Tensor self, int dim, Tensor index, Tensor source) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: source + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, int64_t, const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: source + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: index_copy_ + operator_name: index_copy_ + overload_name: dimname + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::index_copy_.dimname(Tensor(a!) self, Dimname dim, Tensor index, Tensor source) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: source + type: const Tensor & + schema_order_cpp_signature: Tensor & (Tensor &, Dimname, const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: source + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: index_copy + operator_name: index_copy + overload_name: dimname + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::index_copy.dimname(Tensor self, Dimname dim, Tensor index, Tensor source) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: source + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, Dimname, const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: source + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: index_put_ + operator_name: index_put_ + overload_name: '' + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::index_put_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: TensorList + is_nullable: true + name: indices + type: TensorList + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: values + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: accumulate + type: bool + schema_order_cpp_signature: Tensor & (Tensor &, TensorList, const Tensor &, bool) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: TensorList + is_nullable: true + name: indices + type: TensorList + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: values + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: accumulate + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: index_put + operator_name: index_put + overload_name: '' + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::index_put(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: TensorList + is_nullable: true + name: indices + type: TensorList + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: values + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: accumulate + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, TensorList, const Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: TensorList + is_nullable: true + name: indices + type: TensorList + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: values + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: accumulate + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _index_put_impl_ + operator_name: _index_put_impl_ + overload_name: '' + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_index_put_impl_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False, bool unsafe=False) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: TensorList + is_nullable: true + name: indices + type: TensorList + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: values + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: accumulate + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: unsafe + type: bool + schema_order_cpp_signature: Tensor & (Tensor &, TensorList, const Tensor &, bool, bool) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: TensorList + is_nullable: true + name: indices + type: TensorList + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: values + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: accumulate + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: unsafe + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: instance_norm + operator_name: instance_norm + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::instance_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool use_input_stats, float momentum, float eps, bool cudnn_enabled) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: weight + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: running_mean + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: running_var + type: const c10::optional& + - annotation: null + dynamic_type: bool + is_nullable: false + name: use_input_stats + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: momentum + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: cudnn_enabled + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, const c10::optional&, const c10::optional&, const c10::optional&, const c10::optional&, bool, double, double, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: weight + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: running_mean + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: running_var + type: const c10::optional& + - annotation: null + dynamic_type: bool + is_nullable: false + name: use_input_stats + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: momentum + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: cudnn_enabled + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: inverse + operator_name: inverse + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::inverse(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: inverse_out + operator_name: inverse + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::inverse.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _inverse_helper + operator_name: _inverse_helper + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_inverse_helper(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: isclose + operator_name: isclose + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::isclose(Tensor self, Tensor other, float rtol=1e-05, float atol=1e-08, bool equal_nan=False) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - annotation: null + default: 1.0e-05 + dynamic_type: double + is_nullable: false + name: rtol + type: double + - annotation: null + default: 1.0e-08 + dynamic_type: double + is_nullable: false + name: atol + type: double + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: equal_nan + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, double, double, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - annotation: null + default: 1.0e-05 + dynamic_type: double + is_nullable: false + name: rtol + type: double + - annotation: null + default: 1.0e-08 + dynamic_type: double + is_nullable: false + name: atol + type: double + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: equal_nan + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: isnan + operator_name: isnan + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::isnan(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: is_distributed + operator_name: is_distributed + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::is_distributed(Tensor self) -> bool + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: bool (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: bool + name: result + type: bool + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: is_floating_point + operator_name: is_floating_point + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::is_floating_point(Tensor self) -> bool + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: bool (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: bool + name: result + type: bool + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: is_complex + operator_name: is_complex + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::is_complex(Tensor self) -> bool + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: bool (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: bool + name: result + type: bool + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: isreal + operator_name: isreal + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::isreal(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: is_nonzero + operator_name: is_nonzero + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::is_nonzero(Tensor self) -> bool + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: bool (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: bool + name: result + type: bool + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: is_same_size + operator_name: is_same_size + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::is_same_size(Tensor self, Tensor other) -> bool + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: bool (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: bool + name: result + type: bool + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: is_signed + operator_name: is_signed + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::is_signed(Tensor self) -> bool + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: bool (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: bool + name: result + type: bool + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: kl_div + operator_name: kl_div + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::kl_div(Tensor self, Tensor target, int reduction=Mean, *, bool log_target=False) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: log_target + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, int64_t, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: log_target + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: kl_div_backward + operator_name: kl_div_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::kl_div_backward(Tensor grad_output, Tensor self, Tensor target, int reduction=Mean, *, bool log_target=False) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: log_target + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const Tensor &, int64_t, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: log_target + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: kthvalue + operator_name: kthvalue + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::kthvalue(Tensor self, int k, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: k + type: int64_t + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, int64_t, int64_t, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: k + type: int64_t + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + field_name: values + name: values + type: Tensor + - dynamic_type: Tensor + field_name: indices + name: indices + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: kthvalue_out + operator_name: kthvalue + overload_name: values + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::kthvalue.values(Tensor self, int k, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + field_name: values + is_nullable: false + name: values + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + field_name: indices + is_nullable: false + name: indices + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: k + type: int64_t + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, int64_t, int64_t, bool, Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: k + type: int64_t + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: Tensor + field_name: values + is_nullable: false + name: values + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + field_name: indices + is_nullable: false + name: indices + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + field_name: values + name: values + type: Tensor & + - dynamic_type: Tensor + field_name: indices + name: indices + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: kthvalue + operator_name: kthvalue + overload_name: dimname + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::kthvalue.dimname(Tensor self, int k, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: k + type: int64_t + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, int64_t, Dimname, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: k + type: int64_t + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + field_name: values + name: values + type: Tensor + - dynamic_type: Tensor + field_name: indices + name: indices + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: kthvalue_out + operator_name: kthvalue + overload_name: dimname_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::kthvalue.dimname_out(Tensor self, int k, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + field_name: values + is_nullable: false + name: values + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + field_name: indices + is_nullable: false + name: indices + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: k + type: int64_t + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, int64_t, Dimname, bool, Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: k + type: int64_t + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: Tensor + field_name: values + is_nullable: false + name: values + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + field_name: indices + is_nullable: false + name: indices + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + field_name: values + name: values + type: Tensor & + - dynamic_type: Tensor + field_name: indices + name: indices + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: layer_norm + operator_name: layer_norm + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::layer_norm(Tensor input, int[] normalized_shape, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enable=True) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: normalized_shape + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: weight + type: const c10::optional& + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + default: 1.0e-05 + dynamic_type: double + is_nullable: false + name: eps + type: double + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: cudnn_enable + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef, const c10::optional&, const c10::optional&, double, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: normalized_shape + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: weight + type: const c10::optional& + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + default: 1.0e-05 + dynamic_type: double + is_nullable: false + name: eps + type: double + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: cudnn_enable + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: native_layer_norm + operator_name: native_layer_norm + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::native_layer_norm(Tensor input, Tensor? weight, Tensor? bias, int M, int N, float eps) -> (Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: weight + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: M + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: N + type: int64_t + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + schema_order_cpp_signature: std::tuple (const Tensor &, const c10::optional&, const c10::optional&, int64_t, int64_t, double) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: weight + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: M + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: N + type: int64_t + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + - dynamic_type: Tensor + name: result2 + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: native_layer_norm_backward + operator_name: native_layer_norm_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::native_layer_norm_backward(Tensor grad_out, Tensor input, Tensor mean, Tensor rstd, Tensor? weight, int M, int N, bool[3] output_mask) -> (Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_out + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mean + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: rstd + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: weight + type: const c10::optional& + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: M + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: N + type: int64_t + - annotation: null + dynamic_type: std::array + is_nullable: false + name: output_mask + type: std::array + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, const Tensor &, const Tensor &, const c10::optional&, int64_t, int64_t, std::array) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_out + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mean + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: rstd + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: weight + type: const c10::optional& + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: M + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: N + type: int64_t + - annotation: null + dynamic_type: std::array + is_nullable: false + name: output_mask + type: std::array + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + - dynamic_type: Tensor + name: result2 + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: linear + operator_name: linear + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::linear(Tensor input, Tensor weight, Tensor? bias=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const c10::optional&) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: mkldnn_linear + operator_name: mkldnn_linear + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::mkldnn_linear(Tensor input, Tensor weight, Tensor? bias=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const c10::optional&) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: fbgemm_linear_int8_weight_fp32_activation + operator_name: fbgemm_linear_int8_weight_fp32_activation + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::fbgemm_linear_int8_weight_fp32_activation(Tensor input, Tensor weight, Tensor packed, Tensor col_offsets, Scalar weight_scale, Scalar weight_zero_point, Tensor bias) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: packed + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: col_offsets + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: weight_scale + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: weight_zero_point + type: Scalar + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: bias + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const Tensor &, const Tensor &, Scalar, Scalar, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: packed + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: col_offsets + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: weight_scale + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: weight_zero_point + type: Scalar + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: bias + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: fbgemm_linear_int8_weight + operator_name: fbgemm_linear_int8_weight + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::fbgemm_linear_int8_weight(Tensor input, Tensor weight, Tensor packed, Tensor col_offsets, Scalar weight_scale, Scalar weight_zero_point, Tensor bias) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: packed + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: col_offsets + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: weight_scale + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: weight_zero_point + type: Scalar + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: bias + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const Tensor &, const Tensor &, Scalar, Scalar, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: packed + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: col_offsets + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: weight_scale + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: weight_zero_point + type: Scalar + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: bias + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: fbgemm_linear_quantize_weight + operator_name: fbgemm_linear_quantize_weight + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::fbgemm_linear_quantize_weight(Tensor input) -> (Tensor, Tensor, float, int) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + schema_order_cpp_signature: std::tuple (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + - dynamic_type: double + name: result2 + type: double + - dynamic_type: int64_t + name: result3 + type: int64_t + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: fbgemm_pack_gemm_matrix_fp16 + operator_name: fbgemm_pack_gemm_matrix_fp16 + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::fbgemm_pack_gemm_matrix_fp16(Tensor input) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: fbgemm_linear_fp16_weight_fp32_activation + operator_name: fbgemm_linear_fp16_weight_fp32_activation + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::fbgemm_linear_fp16_weight_fp32_activation(Tensor input, Tensor packed_weight, Tensor bias) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: packed_weight + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: bias + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: packed_weight + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: bias + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: fbgemm_linear_fp16_weight + operator_name: fbgemm_linear_fp16_weight + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::fbgemm_linear_fp16_weight(Tensor input, Tensor packed_weight, Tensor bias) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: packed_weight + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: bias + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: packed_weight + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: bias + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: fbgemm_pack_quantized_matrix + operator_name: fbgemm_pack_quantized_matrix + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::fbgemm_pack_quantized_matrix(Tensor input) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: fbgemm_pack_quantized_matrix + operator_name: fbgemm_pack_quantized_matrix + overload_name: KN + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::fbgemm_pack_quantized_matrix.KN(Tensor input, int K, int N) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: K + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: N + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, int64_t, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: K + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: N + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: linspace + operator_name: linspace + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::linspace(Scalar start, Scalar end, int? steps=None, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: start + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: end + type: Scalar + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: steps + type: c10::optional + - annotation: null + default: '{}' + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + schema_order_cpp_signature: Tensor (Scalar, Scalar, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: start + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: end + type: Scalar + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: steps + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: linspace_out + operator_name: linspace + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::linspace.out(Scalar start, Scalar end, int? steps=None, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: start + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: end + type: Scalar + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: steps + type: c10::optional + schema_order_cpp_signature: Tensor & (Scalar, Scalar, c10::optional, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: start + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: end + type: Scalar + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: steps + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: log + operator_name: log + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::log(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: log_ + operator_name: log_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::log_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + schema_order_cpp_signature: Tensor & (Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: log_out + operator_name: log + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::log.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: log10 + operator_name: log10 + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::log10(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: log10_ + operator_name: log10_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::log10_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + schema_order_cpp_signature: Tensor & (Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: log10_out + operator_name: log10 + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::log10.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: log1p + operator_name: log1p + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::log1p(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: log1p_ + operator_name: log1p_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::log1p_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + schema_order_cpp_signature: Tensor & (Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: log1p_out + operator_name: log1p + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: log2 + operator_name: log2 + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::log2(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: log2_ + operator_name: log2_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::log2_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + schema_order_cpp_signature: Tensor & (Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: log2_out + operator_name: log2 + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::log2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logaddexp_out + operator_name: logaddexp + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::logaddexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logaddexp + operator_name: logaddexp + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::logaddexp(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logaddexp2_out + operator_name: logaddexp2 + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::logaddexp2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logaddexp2 + operator_name: logaddexp2 + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::logaddexp2(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logdet + operator_name: logdet + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::logdet(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logspace + operator_name: logspace + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::logspace(Scalar start, Scalar end, int? steps=None, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: start + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: end + type: Scalar + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: steps + type: c10::optional + - annotation: null + default: 10.0 + dynamic_type: double + is_nullable: false + name: base + type: double + - annotation: null + default: '{}' + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + schema_order_cpp_signature: Tensor (Scalar, Scalar, c10::optional, double, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: start + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: end + type: Scalar + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: steps + type: c10::optional + - annotation: null + default: 10.0 + dynamic_type: double + is_nullable: false + name: base + type: double + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logspace_out + operator_name: logspace + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::logspace.out(Scalar start, Scalar end, int? steps=None, float base=10.0, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: start + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: end + type: Scalar + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: steps + type: c10::optional + - annotation: null + default: 10.0 + dynamic_type: double + is_nullable: false + name: base + type: double + schema_order_cpp_signature: Tensor & (Scalar, Scalar, c10::optional, double, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: start + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: end + type: Scalar + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: steps + type: c10::optional + - annotation: null + default: 10.0 + dynamic_type: double + is_nullable: false + name: base + type: double + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: log_softmax + operator_name: log_softmax + overload_name: int + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + name: dtype + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, int64_t, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + name: dtype + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: log_softmax + operator_name: log_softmax + overload_name: Dimname + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::log_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, Dimname, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _log_softmax + operator_name: _log_softmax + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_log_softmax(Tensor self, int dim, bool half_to_float) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: half_to_float + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, int64_t, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: half_to_float + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _log_softmax_backward_data + operator_name: _log_softmax_backward_data + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_log_softmax_backward_data(Tensor grad_output, Tensor output, int dim, Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: output + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, int64_t, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: output + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _logcumsumexp + operator_name: _logcumsumexp + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_logcumsumexp(Tensor self, int dim) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _logcumsumexp_out + operator_name: _logcumsumexp + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: Tensor & (const Tensor &, int64_t, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logcumsumexp + operator_name: logcumsumexp + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::logcumsumexp(Tensor self, int dim) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logcumsumexp_out + operator_name: logcumsumexp + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: Tensor & (const Tensor &, int64_t, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logcumsumexp + operator_name: logcumsumexp + overload_name: dimname + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::logcumsumexp.dimname(Tensor self, Dimname dim) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + schema_order_cpp_signature: Tensor (const Tensor &, Dimname) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logcumsumexp_out + operator_name: logcumsumexp + overload_name: dimname_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::logcumsumexp.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + schema_order_cpp_signature: Tensor & (const Tensor &, Dimname, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logsumexp + operator_name: logsumexp + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dim + size: 1 + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dim + size: 1 + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logsumexp_out + operator_name: logsumexp + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dim + size: 1 + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: Tensor & (const Tensor &, IntArrayRef, bool, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dim + size: 1 + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logsumexp + operator_name: logsumexp + overload_name: names + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::logsumexp.names(Tensor self, Dimname[1] dim, bool keepdim=False) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: DimnameList + is_nullable: false + name: dim + size: 1 + type: DimnameList + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, DimnameList, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: DimnameList + is_nullable: false + name: dim + size: 1 + type: DimnameList + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logsumexp_out + operator_name: logsumexp + overload_name: names_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::logsumexp.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: DimnameList + is_nullable: false + name: dim + size: 1 + type: DimnameList + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: Tensor & (const Tensor &, DimnameList, bool, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: DimnameList + is_nullable: false + name: dim + size: 1 + type: DimnameList + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: margin_ranking_loss + operator_name: margin_ranking_loss + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::margin_ranking_loss(Tensor input1, Tensor input2, Tensor target, float margin=0.0, int reduction=Mean) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input1 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input2 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + default: 0.0 + dynamic_type: double + is_nullable: false + name: margin + type: double + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const Tensor &, double, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input1 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input2 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + default: 0.0 + dynamic_type: double + is_nullable: false + name: margin + type: double + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: matmul + operator_name: matmul + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::matmul(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: matmul_out + operator_name: matmul + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: matrix_rank + operator_name: matrix_rank + overload_name: tol + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::matrix_rank.tol(Tensor self, float tol, bool symmetric=False) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: tol + type: double + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: symmetric + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, double, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: tol + type: double + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: symmetric + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: matrix_rank + operator_name: matrix_rank + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::matrix_rank(Tensor self, bool symmetric=False) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: symmetric + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: symmetric + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: matrix_power + operator_name: matrix_power + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::matrix_power(Tensor self, int n) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: matrix_exp + operator_name: matrix_exp + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::matrix_exp(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: matrix_exp_backward + operator_name: matrix_exp_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::matrix_exp_backward(Tensor self, Tensor grad) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _aminmax + operator_name: _aminmax + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_aminmax(Tensor self) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: std::tuple (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _aminmax + operator_name: _aminmax + overload_name: dim + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_aminmax.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, int64_t, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _compute_linear_combination + operator_name: _compute_linear_combination + overload_name: '' + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_compute_linear_combination(Tensor input, Tensor coefficients) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: coefficients + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: coefficients + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _compute_linear_combination_out + operator_name: _compute_linear_combination + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_compute_linear_combination.out(Tensor input, Tensor coefficients, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: coefficients + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: coefficients + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: max + operator_name: max + overload_name: dim + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::max.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, int64_t, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + field_name: values + name: values + type: Tensor + - dynamic_type: Tensor + field_name: indices + name: indices + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: max_out + operator_name: max + overload_name: dim_max + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::max.dim_max(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + field_name: values + is_nullable: false + name: max + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + field_name: indices + is_nullable: false + name: max_values + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, int64_t, bool, Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: Tensor + field_name: values + is_nullable: false + name: max + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + field_name: indices + is_nullable: false + name: max_values + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + field_name: values + name: max + type: Tensor & + - dynamic_type: Tensor + field_name: indices + name: max_values + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: max + operator_name: max + overload_name: names_dim + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::max.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, Dimname, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + field_name: values + name: values + type: Tensor + - dynamic_type: Tensor + field_name: indices + name: indices + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: max_out + operator_name: max + overload_name: names_dim_max + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::max.names_dim_max(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + field_name: values + is_nullable: false + name: max + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + field_name: indices + is_nullable: false + name: max_values + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, Dimname, bool, Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: Tensor + field_name: values + is_nullable: false + name: max + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + field_name: indices + is_nullable: false + name: max_values + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + field_name: values + name: max + type: Tensor & + - dynamic_type: Tensor + field_name: indices + name: max_values + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: value_selecting_reduction_backward + operator_name: value_selecting_reduction_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::value_selecting_reduction_backward(Tensor grad, int dim, Tensor indices, int[] sizes, bool keepdim) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: sizes + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, int64_t, const Tensor &, IntArrayRef, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: sizes + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: amax + operator_name: amax + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::amax(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: '{}' + dynamic_type: IntArrayRef + is_nullable: false + name: dim + size: 1 + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: '{}' + dynamic_type: IntArrayRef + is_nullable: false + name: dim + size: 1 + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: amax_out + operator_name: amax + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::amax.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: '{}' + dynamic_type: IntArrayRef + is_nullable: false + name: dim + size: 1 + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: Tensor & (const Tensor &, IntArrayRef, bool, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: '{}' + dynamic_type: IntArrayRef + is_nullable: false + name: dim + size: 1 + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: max_pool1d_with_indices + operator_name: max_pool1d_with_indices + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::max_pool1d_with_indices(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 1 + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 1 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 1 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 1 + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 1 + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 1 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 1 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 1 + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: max_pool1d + operator_name: max_pool1d + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::max_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 1 + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 1 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 1 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 1 + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 1 + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 1 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 1 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 1 + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: max_pool2d + operator_name: max_pool2d + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: mkldnn_max_pool2d + operator_name: mkldnn_max_pool2d + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::mkldnn_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: mkldnn_max_pool3d + operator_name: mkldnn_max_pool3d + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::mkldnn_max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 3 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 3 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 3 + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 3 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 3 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 3 + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: quantized_max_pool1d + operator_name: quantized_max_pool1d + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::quantized_max_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 1 + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 1 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 1 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 1 + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 1 + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 1 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 1 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 1 + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: quantized_max_pool2d + operator_name: quantized_max_pool2d + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::quantized_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: max_pool3d + operator_name: max_pool3d + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 3 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 3 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 3 + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 3 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 3 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 3 + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: mean + operator_name: mean + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::mean(Tensor self, *, ScalarType? dtype=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: mean + operator_name: mean + overload_name: dim + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::mean.dim(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dim + size: 1 + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef, bool, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dim + size: 1 + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: mean_out + operator_name: mean + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::mean.out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dim + size: 1 + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: Tensor & (const Tensor &, IntArrayRef, bool, c10::optional, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dim + size: 1 + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: mean + operator_name: mean + overload_name: names_dim + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::mean.names_dim(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: DimnameList + is_nullable: false + name: dim + size: 1 + type: DimnameList + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, DimnameList, bool, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: DimnameList + is_nullable: false + name: dim + size: 1 + type: DimnameList + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: mean_out + operator_name: mean + overload_name: names_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::mean.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: DimnameList + is_nullable: false + name: dim + size: 1 + type: DimnameList + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: Tensor & (const Tensor &, DimnameList, bool, c10::optional, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: DimnameList + is_nullable: false + name: dim + size: 1 + type: DimnameList + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: median + operator_name: median + overload_name: dim + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::median.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, int64_t, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + field_name: values + name: values + type: Tensor + - dynamic_type: Tensor + field_name: indices + name: indices + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: median_out + operator_name: median + overload_name: dim_values + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::median.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + field_name: values + is_nullable: false + name: values + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + field_name: indices + is_nullable: false + name: indices + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, int64_t, bool, Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: Tensor + field_name: values + is_nullable: false + name: values + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + field_name: indices + is_nullable: false + name: indices + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + field_name: values + name: values + type: Tensor & + - dynamic_type: Tensor + field_name: indices + name: indices + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: median + operator_name: median + overload_name: names_dim + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::median.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, Dimname, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + field_name: values + name: values + type: Tensor + - dynamic_type: Tensor + field_name: indices + name: indices + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: median_out + operator_name: median + overload_name: names_dim_values + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::median.names_dim_values(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + field_name: values + is_nullable: false + name: values + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + field_name: indices + is_nullable: false + name: indices + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, Dimname, bool, Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: Tensor + field_name: values + is_nullable: false + name: values + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + field_name: indices + is_nullable: false + name: indices + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + field_name: values + name: values + type: Tensor & + - dynamic_type: Tensor + field_name: indices + name: indices + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: min + operator_name: min + overload_name: dim + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::min.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, int64_t, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + field_name: values + name: values + type: Tensor + - dynamic_type: Tensor + field_name: indices + name: indices + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: min_out + operator_name: min + overload_name: dim_min + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::min.dim_min(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + field_name: values + is_nullable: false + name: min + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + field_name: indices + is_nullable: false + name: min_indices + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, int64_t, bool, Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: Tensor + field_name: values + is_nullable: false + name: min + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + field_name: indices + is_nullable: false + name: min_indices + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + field_name: values + name: min + type: Tensor & + - dynamic_type: Tensor + field_name: indices + name: min_indices + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: min + operator_name: min + overload_name: names_dim + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::min.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, Dimname, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + field_name: values + name: values + type: Tensor + - dynamic_type: Tensor + field_name: indices + name: indices + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: min_out + operator_name: min + overload_name: names_dim_min + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::min.names_dim_min(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + field_name: values + is_nullable: false + name: min + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + field_name: indices + is_nullable: false + name: min_indices + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, Dimname, bool, Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: Tensor + field_name: values + is_nullable: false + name: min + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + field_name: indices + is_nullable: false + name: min_indices + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + field_name: values + name: min + type: Tensor & + - dynamic_type: Tensor + field_name: indices + name: min_indices + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: amin + operator_name: amin + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::amin(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: '{}' + dynamic_type: IntArrayRef + is_nullable: false + name: dim + size: 1 + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: '{}' + dynamic_type: IntArrayRef + is_nullable: false + name: dim + size: 1 + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: amin_out + operator_name: amin + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::amin.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: '{}' + dynamic_type: IntArrayRef + is_nullable: false + name: dim + size: 1 + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: Tensor & (const Tensor &, IntArrayRef, bool, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: '{}' + dynamic_type: IntArrayRef + is_nullable: false + name: dim + size: 1 + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: mkldnn_convolution + operator_name: mkldnn_convolution + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::mkldnn_convolution(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const c10::optional&, IntArrayRef, IntArrayRef, IntArrayRef, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: mkldnn_convolution_backward_input + operator_name: mkldnn_convolution_backward_input + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::mkldnn_convolution_backward_input(int[] self_size, Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool bias_defined) -> Tensor + arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: self_size + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: bias_defined + type: bool + schema_order_cpp_signature: Tensor (IntArrayRef, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, int64_t, bool) + schema_order_arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: self_size + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: bias_defined + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: mkldnn_convolution_backward_weights + operator_name: mkldnn_convolution_backward_weights + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::mkldnn_convolution_backward_weights(int[] weight_size, Tensor grad_output, Tensor self, int[] padding, int[] stride, int[] dilation, int groups, bool bias_defined) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: weight_size + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: bias_defined + type: bool + schema_order_cpp_signature: std::tuple (IntArrayRef, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, int64_t, bool) + schema_order_arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: weight_size + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: bias_defined + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: mkldnn_convolution_backward + operator_name: mkldnn_convolution_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::mkldnn_convolution_backward(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: std::array + is_nullable: false + name: output_mask + type: std::array + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, int64_t, std::array) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: std::array + is_nullable: false + name: output_mask + type: std::array + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + - dynamic_type: Tensor + name: result2 + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: miopen_batch_norm + operator_name: miopen_batch_norm + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::miopen_batch_norm(Tensor input, Tensor weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float exponential_average_factor, float epsilon) -> (Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: running_mean + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: running_var + type: const c10::optional& + - annotation: null + dynamic_type: bool + is_nullable: false + name: training + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: exponential_average_factor + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: epsilon + type: double + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, const c10::optional&, const c10::optional&, const c10::optional&, bool, double, double) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: running_mean + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: running_var + type: const c10::optional& + - annotation: null + dynamic_type: bool + is_nullable: false + name: training + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: exponential_average_factor + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: epsilon + type: double + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + - dynamic_type: Tensor + name: result2 + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: miopen_batch_norm_backward + operator_name: miopen_batch_norm_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::miopen_batch_norm_backward(Tensor input, Tensor grad_output, Tensor weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_var, float epsilon) -> (Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: running_mean + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: running_var + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: save_mean + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: save_var + type: const c10::optional& + - annotation: null + dynamic_type: double + is_nullable: false + name: epsilon + type: double + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, const Tensor &, const c10::optional&, const c10::optional&, const c10::optional&, const c10::optional&, double) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: running_mean + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: running_var + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: save_mean + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: save_var + type: const c10::optional& + - annotation: null + dynamic_type: double + is_nullable: false + name: epsilon + type: double + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + - dynamic_type: Tensor + name: result2 + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: miopen_convolution + operator_name: miopen_convolution + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::miopen_convolution(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: benchmark + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const c10::optional&, IntArrayRef, IntArrayRef, IntArrayRef, int64_t, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: benchmark + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: miopen_convolution_backward_input + operator_name: miopen_convolution_backward_input + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::miopen_convolution_backward_input(int[] self_size, Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor + arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: self_size + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: benchmark + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + schema_order_cpp_signature: Tensor (IntArrayRef, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, int64_t, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: self_size + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: benchmark + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: miopen_convolution_backward + operator_name: miopen_convolution_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::miopen_convolution_backward(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool[3] output_mask) -> (Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: benchmark + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + - annotation: null + dynamic_type: std::array + is_nullable: false + name: output_mask + type: std::array + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, int64_t, bool, bool, std::array) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: benchmark + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + - annotation: null + dynamic_type: std::array + is_nullable: false + name: output_mask + type: std::array + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + - dynamic_type: Tensor + name: result2 + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: miopen_convolution_backward_bias + operator_name: miopen_convolution_backward_bias + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::miopen_convolution_backward_bias(Tensor grad_output) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: miopen_convolution_backward_weight + operator_name: miopen_convolution_backward_weight + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::miopen_convolution_backward_weight(int[] weight_size, Tensor grad_output, Tensor self, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor + arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: weight_size + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: benchmark + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + schema_order_cpp_signature: Tensor (IntArrayRef, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, int64_t, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: weight_size + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: benchmark + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: miopen_convolution_transpose + operator_name: miopen_convolution_transpose + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::miopen_convolution_transpose(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: benchmark + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const c10::optional&, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, int64_t, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: benchmark + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: miopen_convolution_transpose_backward + operator_name: miopen_convolution_transpose_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::miopen_convolution_transpose_backward(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] output_padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool[3] output_mask) -> (Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: benchmark + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + - annotation: null + dynamic_type: std::array + is_nullable: false + name: output_mask + type: std::array + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, int64_t, bool, bool, std::array) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: benchmark + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + - annotation: null + dynamic_type: std::array + is_nullable: false + name: output_mask + type: std::array + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + - dynamic_type: Tensor + name: result2 + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: miopen_convolution_transpose_backward_input + operator_name: miopen_convolution_transpose_backward_input + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::miopen_convolution_transpose_backward_input(Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: benchmark + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, int64_t, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: benchmark + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: miopen_convolution_transpose_backward_weight + operator_name: miopen_convolution_transpose_backward_weight + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::miopen_convolution_transpose_backward_weight(int[] weight_size, Tensor grad_output, Tensor self, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor + arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: weight_size + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: benchmark + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + schema_order_cpp_signature: Tensor (IntArrayRef, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, int64_t, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: weight_size + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: benchmark + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: miopen_depthwise_convolution + operator_name: miopen_depthwise_convolution + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::miopen_depthwise_convolution(Tensor self, Tensor weight, Tensor? bias, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: benchmark + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const c10::optional&, IntArrayRef, IntArrayRef, IntArrayRef, int64_t, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: benchmark + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: miopen_depthwise_convolution_backward_input + operator_name: miopen_depthwise_convolution_backward_input + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::miopen_depthwise_convolution_backward_input(int[] self_size, Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor + arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: self_size + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: benchmark + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + schema_order_cpp_signature: Tensor (IntArrayRef, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, int64_t, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: self_size + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: benchmark + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: miopen_depthwise_convolution_backward + operator_name: miopen_depthwise_convolution_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::miopen_depthwise_convolution_backward(Tensor self, Tensor grad_output, Tensor weight, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, bool[3] output_mask) -> (Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: benchmark + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + - annotation: null + dynamic_type: std::array + is_nullable: false + name: output_mask + type: std::array + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, int64_t, bool, bool, std::array) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: benchmark + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + - annotation: null + dynamic_type: std::array + is_nullable: false + name: output_mask + type: std::array + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + - dynamic_type: Tensor + name: result2 + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: miopen_depthwise_convolution_backward_weight + operator_name: miopen_depthwise_convolution_backward_weight + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::miopen_depthwise_convolution_backward_weight(int[] weight_size, Tensor grad_output, Tensor self, int[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor + arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: weight_size + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: benchmark + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + schema_order_cpp_signature: Tensor (IntArrayRef, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, int64_t, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: weight_size + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: benchmark + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: deterministic + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: miopen_rnn + operator_name: miopen_rnn + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::miopen_rnn(Tensor input, Tensor[] weight, int weight_stride0, Tensor hx, Tensor? cx, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state) -> (Tensor, Tensor, Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: weight + type: TensorList + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: weight_stride0 + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: hx + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: cx + type: const c10::optional& + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: mode + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: hidden_size + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_layers + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: batch_first + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: bidirectional + type: bool + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: batch_sizes + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: dropout_state + type: const c10::optional& + schema_order_cpp_signature: std::tuple (const Tensor &, TensorList, int64_t, const Tensor &, const c10::optional&, int64_t, int64_t, int64_t, bool, double, bool, bool, IntArrayRef, const c10::optional&) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: weight + type: TensorList + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: weight_stride0 + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: hx + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: cx + type: const c10::optional& + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: mode + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: hidden_size + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_layers + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: batch_first + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: bidirectional + type: bool + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: batch_sizes + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: dropout_state + type: const c10::optional& + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + - dynamic_type: Tensor + name: result2 + type: Tensor + - dynamic_type: Tensor + name: result3 + type: Tensor + - dynamic_type: Tensor + name: result4 + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: miopen_rnn_backward + operator_name: miopen_rnn_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::miopen_rnn_backward(Tensor input, Tensor[] weight, int weight_stride0, Tensor weight_buf, Tensor hx, Tensor? cx, Tensor output, Tensor? grad_output, Tensor? grad_hy, Tensor? grad_cy, int mode, int hidden_size, int num_layers, bool batch_first, float dropout, bool train, bool bidirectional, int[] batch_sizes, Tensor? dropout_state, Tensor reserve, bool[4] output_mask) -> (Tensor, Tensor, Tensor, Tensor[]) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: weight + type: TensorList + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: weight_stride0 + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight_buf + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: hx + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: cx + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: grad_output + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: grad_hy + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: grad_cy + type: const c10::optional& + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: mode + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: hidden_size + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_layers + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: batch_first + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: bidirectional + type: bool + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: batch_sizes + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: dropout_state + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: reserve + type: const Tensor & + - annotation: null + dynamic_type: std::array + is_nullable: false + name: output_mask + type: std::array + schema_order_cpp_signature: std::tuple> (const Tensor &, TensorList, int64_t, const Tensor &, const Tensor &, const c10::optional&, const Tensor &, const c10::optional&, const c10::optional&, const c10::optional&, int64_t, int64_t, int64_t, bool, double, bool, bool, IntArrayRef, const c10::optional&, const Tensor &, std::array) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: weight + type: TensorList + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: weight_stride0 + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight_buf + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: hx + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: cx + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: grad_output + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: grad_hy + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: grad_cy + type: const c10::optional& + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: mode + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: hidden_size + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_layers + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: batch_first + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: bidirectional + type: bool + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: batch_sizes + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: dropout_state + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: reserve + type: const Tensor & + - annotation: null + dynamic_type: std::array + is_nullable: false + name: output_mask + type: std::array + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + - dynamic_type: Tensor + name: result2 + type: Tensor + - dynamic_type: TensorList + name: result3 + type: std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: mm + operator_name: mm + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::mm(Tensor self, Tensor mat2) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mat2 + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mat2 + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: mm_out + operator_name: mm + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::mm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mat2 + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mat2 + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _sparse_mm + operator_name: _sparse_mm + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_sparse_mm(Tensor sparse, Tensor dense) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: sparse + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: dense + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: sparse + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: dense + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: mode + operator_name: mode + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::mode(Tensor self, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, int64_t, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + field_name: values + name: values + type: Tensor + - dynamic_type: Tensor + field_name: indices + name: indices + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: mode_out + operator_name: mode + overload_name: values + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::mode.values(Tensor self, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + field_name: values + is_nullable: false + name: values + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + field_name: indices + is_nullable: false + name: indices + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, int64_t, bool, Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: Tensor + field_name: values + is_nullable: false + name: values + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + field_name: indices + is_nullable: false + name: indices + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + field_name: values + name: values + type: Tensor & + - dynamic_type: Tensor + field_name: indices + name: indices + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: mode + operator_name: mode + overload_name: dimname + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::mode.dimname(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, Dimname, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + field_name: values + name: values + type: Tensor + - dynamic_type: Tensor + field_name: indices + name: indices + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: mode_out + operator_name: mode + overload_name: dimname_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::mode.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + field_name: values + is_nullable: false + name: values + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + field_name: indices + is_nullable: false + name: indices + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, Dimname, bool, Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: Tensor + field_name: values + is_nullable: false + name: values + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + field_name: indices + is_nullable: false + name: indices + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + field_name: values + name: values + type: Tensor & + - dynamic_type: Tensor + field_name: indices + name: indices + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: mul + operator_name: mul + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::mul.Tensor(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: mul_ + operator_name: mul_ + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::mul_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (Tensor &, const Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: mul_out + operator_name: mul + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::mul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: mul + operator_name: mul + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::mul.Scalar(Tensor self, Scalar other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: mul_ + operator_name: mul_ + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::mul_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor & (Tensor &, Scalar) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: multiply + operator_name: multiply + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::multiply.Tensor(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: multiply_ + operator_name: multiply_ + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::multiply_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (Tensor &, const Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: multiply_out + operator_name: multiply + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::multiply.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: multiply + operator_name: multiply + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::multiply.Scalar(Tensor self, Scalar other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: multiply_ + operator_name: multiply_ + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::multiply_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor & (Tensor &, Scalar) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: mv + operator_name: mv + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::mv(Tensor self, Tensor vec) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: vec + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: vec + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: mv_out + operator_name: mv + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::mv.out(Tensor self, Tensor vec, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: vec + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: vec + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: mvlgamma + operator_name: mvlgamma + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::mvlgamma(Tensor self, int p) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: p + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: p + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: mvlgamma_ + operator_name: mvlgamma_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::mvlgamma_(Tensor(a!) self, int p) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: p + type: int64_t + schema_order_cpp_signature: Tensor & (Tensor &, int64_t) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: p + type: int64_t + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: narrow_copy + operator_name: narrow_copy + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::narrow_copy(Tensor self, int dim, int start, int length) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: start + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: length + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, int64_t, int64_t, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: start + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: length + type: int64_t + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: narrow + operator_name: narrow + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::narrow(Tensor(a) self, int dim, int start, int length) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: start + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: length + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, int64_t, int64_t, int64_t) + schema_order_arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: start + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: length + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: narrow + operator_name: narrow + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::narrow.Tensor(Tensor(a) self, int dim, Tensor start, int length) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: start + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: length + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, int64_t, const Tensor &, int64_t) + schema_order_arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: start + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: length + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: native_batch_norm + operator_name: native_batch_norm + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::native_batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: weight + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: running_mean + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: running_var + type: const c10::optional& + - annotation: null + dynamic_type: bool + is_nullable: false + name: training + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: momentum + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + schema_order_cpp_signature: std::tuple (const Tensor &, const c10::optional&, const c10::optional&, const c10::optional&, const c10::optional&, bool, double, double) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: weight + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: running_mean + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: running_var + type: const c10::optional& + - annotation: null + dynamic_type: bool + is_nullable: false + name: training + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: momentum + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + - dynamic_type: Tensor + name: result2 + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: native_batch_norm_out + operator_name: native_batch_norm + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::native_batch_norm.out(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, *, Tensor(a!) out, Tensor(b!) save_mean, Tensor(c!) save_invstd) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + is_nullable: false + name: save_mean + output: true + type: Tensor & + - allocate: true + annotation: c! + dynamic_type: Tensor + is_nullable: false + name: save_invstd + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: weight + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: bias + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: running_mean + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: running_var + type: const Tensor & + - annotation: null + dynamic_type: bool + is_nullable: false + name: training + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: momentum + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, bool, double, double, Tensor &, Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: weight + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: bias + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: running_mean + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: running_var + type: const Tensor & + - annotation: null + dynamic_type: bool + is_nullable: false + name: training + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: momentum + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + is_nullable: false + name: save_mean + output: true + type: Tensor & + - allocate: true + annotation: c! + dynamic_type: Tensor + is_nullable: false + name: save_invstd + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + - dynamic_type: Tensor + name: save_mean + type: Tensor & + - dynamic_type: Tensor + name: save_invstd + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: batch_norm_stats + operator_name: batch_norm_stats + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::batch_norm_stats(Tensor input, float eps) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + schema_order_cpp_signature: std::tuple (const Tensor &, double) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: batch_norm_elemt + operator_name: batch_norm_elemt + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::batch_norm_elemt(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor invstd, float eps) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: weight + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mean + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: invstd + type: const Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + schema_order_cpp_signature: Tensor (const Tensor &, const c10::optional&, const c10::optional&, const Tensor &, const Tensor &, double) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: weight + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mean + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: invstd + type: const Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: batch_norm_elemt_out + operator_name: batch_norm_elemt + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::batch_norm_elemt.out(Tensor input, Tensor? weight, Tensor? bias, Tensor mean, Tensor invstd, float eps, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: weight + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: bias + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mean + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: invstd + type: const Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, double, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: weight + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: bias + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mean + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: invstd + type: const Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: batch_norm_gather_stats + operator_name: batch_norm_gather_stats + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::batch_norm_gather_stats(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, int count) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mean + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: invstd + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: running_mean + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: running_var + type: const c10::optional& + - annotation: null + dynamic_type: double + is_nullable: false + name: momentum + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: count + type: int64_t + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, const Tensor &, const c10::optional&, const c10::optional&, double, double, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mean + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: invstd + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: running_mean + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: running_var + type: const c10::optional& + - annotation: null + dynamic_type: double + is_nullable: false + name: momentum + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: count + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: batch_norm_gather_stats_with_counts + operator_name: batch_norm_gather_stats_with_counts + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::batch_norm_gather_stats_with_counts(Tensor input, Tensor mean, Tensor invstd, Tensor? running_mean, Tensor? running_var, float momentum, float eps, Tensor counts) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mean + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: invstd + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: running_mean + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: running_var + type: const c10::optional& + - annotation: null + dynamic_type: double + is_nullable: false + name: momentum + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: counts + type: const Tensor & + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, const Tensor &, const c10::optional&, const c10::optional&, double, double, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mean + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: invstd + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: running_mean + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: running_var + type: const c10::optional& + - annotation: null + dynamic_type: double + is_nullable: false + name: momentum + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: counts + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: native_batch_norm_backward + operator_name: native_batch_norm_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::native_batch_norm_backward(Tensor grad_out, Tensor input, Tensor? weight, Tensor? running_mean, Tensor? running_var, Tensor? save_mean, Tensor? save_invstd, bool train, float eps, bool[3] output_mask) -> (Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_out + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: weight + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: running_mean + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: running_var + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: save_mean + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: save_invstd + type: const c10::optional& + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + - annotation: null + dynamic_type: std::array + is_nullable: false + name: output_mask + type: std::array + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, const c10::optional&, const c10::optional&, const c10::optional&, const c10::optional&, const c10::optional&, bool, double, std::array) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_out + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: weight + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: running_mean + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: running_var + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: save_mean + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: save_invstd + type: const c10::optional& + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + - annotation: null + dynamic_type: std::array + is_nullable: false + name: output_mask + type: std::array + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + - dynamic_type: Tensor + name: result2 + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: batch_norm_backward_reduce + operator_name: batch_norm_backward_reduce + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::batch_norm_backward_reduce(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, bool input_g, bool weight_g, bool bias_g) -> (Tensor, Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_out + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mean + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: invstd + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: weight + type: const c10::optional& + - annotation: null + dynamic_type: bool + is_nullable: false + name: input_g + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: weight_g + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: bias_g + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, const Tensor &, const Tensor &, const c10::optional&, bool, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_out + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mean + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: invstd + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: weight + type: const c10::optional& + - annotation: null + dynamic_type: bool + is_nullable: false + name: input_g + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: weight_g + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: bias_g + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + - dynamic_type: Tensor + name: result2 + type: Tensor + - dynamic_type: Tensor + name: result3 + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: batch_norm_backward_elemt + operator_name: batch_norm_backward_elemt + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::batch_norm_backward_elemt(Tensor grad_out, Tensor input, Tensor mean, Tensor invstd, Tensor? weight, Tensor mean_dy, Tensor mean_dy_xmu) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_out + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mean + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: invstd + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: weight + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mean_dy + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mean_dy_xmu + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const Tensor &, const Tensor &, const c10::optional&, const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_out + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mean + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: invstd + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: weight + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mean_dy + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mean_dy_xmu + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: batch_norm_update_stats + operator_name: batch_norm_update_stats + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::batch_norm_update_stats(Tensor input, Tensor? running_mean, Tensor? running_var, float momentum) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: running_mean + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: running_var + type: const c10::optional& + - annotation: null + dynamic_type: double + is_nullable: false + name: momentum + type: double + schema_order_cpp_signature: std::tuple (const Tensor &, const c10::optional&, const c10::optional&, double) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: running_mean + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: running_var + type: const c10::optional& + - annotation: null + dynamic_type: double + is_nullable: false + name: momentum + type: double + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: is_vulkan_available + operator_name: is_vulkan_available + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::is_vulkan_available() -> bool + arguments: [] + schema_order_cpp_signature: bool () + schema_order_arguments: [] + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: bool + name: result + type: bool + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _nnpack_available + operator_name: _nnpack_available + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_nnpack_available() -> bool + arguments: [] + schema_order_cpp_signature: bool () + schema_order_arguments: [] + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: bool + name: result + type: bool + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _nnpack_spatial_convolution + operator_name: _nnpack_spatial_convolution + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_nnpack_spatial_convolution(Tensor input, Tensor weight, Tensor? bias, int[2] padding, int[2] stride=1) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const c10::optional&, IntArrayRef, IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _nnpack_spatial_convolution_backward + operator_name: _nnpack_spatial_convolution_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_nnpack_spatial_convolution_backward(Tensor input, Tensor grad_output, Tensor weight, int[2] padding, bool[3] output_mask) -> (Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: std::array + is_nullable: false + name: output_mask + type: std::array + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, const Tensor &, IntArrayRef, std::array) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: std::array + is_nullable: false + name: output_mask + type: std::array + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + - dynamic_type: Tensor + name: result2 + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _nnpack_spatial_convolution_backward_input + operator_name: _nnpack_spatial_convolution_backward_input + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_nnpack_spatial_convolution_backward_input(Tensor input, Tensor grad_output, Tensor weight, int[2] padding) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const Tensor &, IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _nnpack_spatial_convolution_backward_weight + operator_name: _nnpack_spatial_convolution_backward_weight + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_nnpack_spatial_convolution_backward_weight(Tensor input, int[] weightsize, Tensor grad_output, int[2] padding) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: weightsize + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef, const Tensor &, IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: weightsize + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: ones + operator_name: ones + overload_name: names + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::ones.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + dynamic_type: DimnameList + is_nullable: true + kwarg_only: true + name: names + type: c10::optional + - annotation: null + default: '{}' + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + schema_order_cpp_signature: Tensor (IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + dynamic_type: DimnameList + is_nullable: true + kwarg_only: true + name: names + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: ones + operator_name: ones + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::ones(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + schema_order_cpp_signature: Tensor (IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: ones_out + operator_name: ones + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::ones.out(int[] size, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + schema_order_cpp_signature: Tensor & (IntArrayRef, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: ones_like + operator_name: ones_like + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::ones_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: '{}' + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + - annotation: null + default: c10::nullopt + dynamic_type: MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: pairwise_distance + operator_name: pairwise_distance + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::pairwise_distance(Tensor x1, Tensor x2, float p=2, float eps=1e-06, bool keepdim=False) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: x1 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: x2 + type: const Tensor & + - annotation: null + default: 2 + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + default: 1.0e-06 + dynamic_type: double + is_nullable: false + name: eps + type: double + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, double, double, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: x1 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: x2 + type: const Tensor & + - annotation: null + default: 2 + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + default: 1.0e-06 + dynamic_type: double + is_nullable: false + name: eps + type: double + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cdist + operator_name: cdist + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::cdist(Tensor x1, Tensor x2, float p=2, int? compute_mode=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: x1 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: x2 + type: const Tensor & + - annotation: null + default: 2 + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: compute_mode + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, double, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: x1 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: x2 + type: const Tensor & + - annotation: null + default: 2 + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: compute_mode + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _euclidean_dist + operator_name: _euclidean_dist + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_euclidean_dist(Tensor x1, Tensor x2) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: x1 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: x2 + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: x1 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: x2 + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _cdist_forward + operator_name: _cdist_forward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_cdist_forward(Tensor x1, Tensor x2, float p, int? compute_mode) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: x1 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: x2 + type: const Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + dynamic_type: int64_t + is_nullable: true + name: compute_mode + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, double, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: x1 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: x2 + type: const Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + dynamic_type: int64_t + is_nullable: true + name: compute_mode + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _cdist_backward + operator_name: _cdist_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_cdist_backward(Tensor grad, Tensor x1, Tensor x2, float p, Tensor cdist) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: x1 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: x2 + type: const Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: cdist + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const Tensor &, double, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: x1 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: x2 + type: const Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: cdist + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: pdist + operator_name: pdist + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::pdist(Tensor self, float p=2) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: 2 + dynamic_type: double + is_nullable: false + name: p + type: double + schema_order_cpp_signature: Tensor (const Tensor &, double) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: 2 + dynamic_type: double + is_nullable: false + name: p + type: double + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _pdist_forward + operator_name: _pdist_forward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_pdist_forward(Tensor self, float p=2) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: 2 + dynamic_type: double + is_nullable: false + name: p + type: double + schema_order_cpp_signature: Tensor (const Tensor &, double) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: 2 + dynamic_type: double + is_nullable: false + name: p + type: double + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _pdist_backward + operator_name: _pdist_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_pdist_backward(Tensor grad, Tensor self, float p, Tensor pdist) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: pdist + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, double, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: pdist + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cosine_similarity + operator_name: cosine_similarity + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::cosine_similarity(Tensor x1, Tensor x2, int dim=1, float eps=1e-08) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: x1 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: x2 + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: 1.0e-08 + dynamic_type: double + is_nullable: false + name: eps + type: double + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, int64_t, double) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: x1 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: x2 + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: 1.0e-08 + dynamic_type: double + is_nullable: false + name: eps + type: double + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: permute + operator_name: permute + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::permute(Tensor(a) self, int[] dims) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dims + type: IntArrayRef + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef) + schema_order_arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dims + type: IntArrayRef + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: movedim + operator_name: movedim + overload_name: intlist + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::movedim.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: source + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: destination + type: IntArrayRef + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef, IntArrayRef) + schema_order_arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: source + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: destination + type: IntArrayRef + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: movedim + operator_name: movedim + overload_name: int + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::movedim.int(Tensor(a) self, int source, int destination) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: source + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: destination + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, int64_t, int64_t) + schema_order_arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: source + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: destination + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: numpy_T + operator_name: numpy_T + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::numpy_T(Tensor(a) self) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: pixel_shuffle + operator_name: pixel_shuffle + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::pixel_shuffle(Tensor self, int upscale_factor) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: upscale_factor + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: upscale_factor + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: channel_shuffle + operator_name: channel_shuffle + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::channel_shuffle(Tensor self, int groups) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: is_pinned + operator_name: is_pinned + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::is_pinned(Tensor self) -> bool + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: bool (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: bool + name: result + type: bool + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: pin_memory + operator_name: pin_memory + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::pin_memory(Tensor(a) self) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: pinverse + operator_name: pinverse + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::pinverse(Tensor self, float rcond=1e-15) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: 1.0e-15 + dynamic_type: double + is_nullable: false + name: rcond + type: double + schema_order_cpp_signature: Tensor (const Tensor &, double) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: 1.0e-15 + dynamic_type: double + is_nullable: false + name: rcond + type: double + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: poisson_nll_loss + operator_name: poisson_nll_loss + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::poisson_nll_loss(Tensor input, Tensor target, bool log_input, bool full, float eps, int reduction) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + dynamic_type: bool + is_nullable: false + name: log_input + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: full + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, bool, bool, double, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + dynamic_type: bool + is_nullable: false + name: log_input + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: full + type: bool + - annotation: null + dynamic_type: double + is_nullable: false + name: eps + type: double + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: rad2deg + operator_name: rad2deg + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::rad2deg(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: rad2deg_ + operator_name: rad2deg_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::rad2deg_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + schema_order_cpp_signature: Tensor & (Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: rad2deg_out + operator_name: rad2deg + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::rad2deg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: deg2rad + operator_name: deg2rad + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::deg2rad(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: deg2rad_ + operator_name: deg2rad_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::deg2rad_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + schema_order_cpp_signature: Tensor & (Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: deg2rad_out + operator_name: deg2rad + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::deg2rad.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: scalar_tensor + operator_name: scalar_tensor + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::scalar_tensor(Scalar s, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: s + type: Scalar + - annotation: null + default: '{}' + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + schema_order_cpp_signature: Tensor (Scalar, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: s + type: Scalar + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: rand + operator_name: rand + overload_name: names + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::rand.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + dynamic_type: DimnameList + is_nullable: true + kwarg_only: true + name: names + type: c10::optional + - annotation: null + default: '{}' + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + schema_order_cpp_signature: Tensor (IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + dynamic_type: DimnameList + is_nullable: true + kwarg_only: true + name: names + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: rand + operator_name: rand + overload_name: generator_with_names + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::rand.generator_with_names(int[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + - annotation: null + dynamic_type: DimnameList + is_nullable: true + kwarg_only: true + name: names + type: c10::optional + - annotation: null + default: '{}' + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + schema_order_cpp_signature: Tensor (IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + - annotation: null + dynamic_type: DimnameList + is_nullable: true + kwarg_only: true + name: names + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: rand + operator_name: rand + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::rand(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + schema_order_cpp_signature: Tensor (IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: rand + operator_name: rand + overload_name: generator + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::rand.generator(int[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + - annotation: null + default: '{}' + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + schema_order_cpp_signature: Tensor (IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: rand_out + operator_name: rand + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::rand.out(int[] size, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + schema_order_cpp_signature: Tensor & (IntArrayRef, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: rand_out + operator_name: rand + overload_name: generator_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::rand.generator_out(int[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + schema_order_cpp_signature: Tensor & (IntArrayRef, c10::optional, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: rand_like + operator_name: rand_like + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::rand_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: '{}' + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + - annotation: null + default: c10::nullopt + dynamic_type: MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: randint + operator_name: randint + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::randint(int high, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: high + type: int64_t + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + schema_order_cpp_signature: Tensor (int64_t, IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: high + type: int64_t + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: randint + operator_name: randint + overload_name: generator + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::randint.generator(int high, int[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: high + type: int64_t + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + - annotation: null + default: '{}' + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + schema_order_cpp_signature: Tensor (int64_t, IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: high + type: int64_t + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: randint + operator_name: randint + overload_name: low + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::randint.low(int low, int high, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: low + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: high + type: int64_t + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + schema_order_cpp_signature: Tensor (int64_t, int64_t, IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: low + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: high + type: int64_t + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: randint + operator_name: randint + overload_name: low_generator + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::randint.low_generator(int low, int high, int[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: low + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: high + type: int64_t + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + - annotation: null + default: '{}' + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + schema_order_cpp_signature: Tensor (int64_t, int64_t, IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: low + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: high + type: int64_t + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: randint_out + operator_name: randint + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::randint.out(int high, int[] size, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: high + type: int64_t + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + schema_order_cpp_signature: Tensor & (int64_t, IntArrayRef, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: high + type: int64_t + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: randint_out + operator_name: randint + overload_name: generator_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::randint.generator_out(int high, int[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: high + type: int64_t + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + schema_order_cpp_signature: Tensor & (int64_t, IntArrayRef, c10::optional, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: high + type: int64_t + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: randint_out + operator_name: randint + overload_name: low_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::randint.low_out(int low, int high, int[] size, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: low + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: high + type: int64_t + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + schema_order_cpp_signature: Tensor & (int64_t, int64_t, IntArrayRef, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: low + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: high + type: int64_t + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: randint_out + operator_name: randint + overload_name: low_generator_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::randint.low_generator_out(int low, int high, int[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: low + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: high + type: int64_t + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + schema_order_cpp_signature: Tensor & (int64_t, int64_t, IntArrayRef, c10::optional, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: low + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: high + type: int64_t + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: randint_like + operator_name: randint_like + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::randint_like(Tensor self, int high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: high + type: int64_t + - annotation: null + default: '{}' + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + - annotation: null + default: c10::nullopt + dynamic_type: MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, int64_t, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: high + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: randint_like + operator_name: randint_like + overload_name: low_dtype + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::randint_like.low_dtype(Tensor self, int low, int high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: low + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: high + type: int64_t + - annotation: null + default: '{}' + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + - annotation: null + default: c10::nullopt + dynamic_type: MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, int64_t, int64_t, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: low + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: high + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: randn + operator_name: randn + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::randn(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + schema_order_cpp_signature: Tensor (IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: randn + operator_name: randn + overload_name: generator + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::randn.generator(int[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + - annotation: null + default: '{}' + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + schema_order_cpp_signature: Tensor (IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: randn + operator_name: randn + overload_name: names + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::randn.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + dynamic_type: DimnameList + is_nullable: true + kwarg_only: true + name: names + type: c10::optional + - annotation: null + default: '{}' + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + schema_order_cpp_signature: Tensor (IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + dynamic_type: DimnameList + is_nullable: true + kwarg_only: true + name: names + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: randn + operator_name: randn + overload_name: generator_with_names + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::randn.generator_with_names(int[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + - annotation: null + dynamic_type: DimnameList + is_nullable: true + kwarg_only: true + name: names + type: c10::optional + - annotation: null + default: '{}' + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + schema_order_cpp_signature: Tensor (IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + - annotation: null + dynamic_type: DimnameList + is_nullable: true + kwarg_only: true + name: names + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: randn_out + operator_name: randn + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::randn.out(int[] size, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + schema_order_cpp_signature: Tensor & (IntArrayRef, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: randn_out + operator_name: randn + overload_name: generator_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::randn.generator_out(int[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + schema_order_cpp_signature: Tensor & (IntArrayRef, c10::optional, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: randn_like + operator_name: randn_like + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::randn_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: '{}' + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + - annotation: null + default: c10::nullopt + dynamic_type: MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: randperm + operator_name: randperm + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::randperm(int n, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + - annotation: null + default: '{}' + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + schema_order_cpp_signature: Tensor (int64_t, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: randperm + operator_name: randperm + overload_name: generator + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::randperm.generator(int n, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + - annotation: null + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + - annotation: null + default: '{}' + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + schema_order_cpp_signature: Tensor (int64_t, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + - annotation: null + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: randperm_out + operator_name: randperm + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::randperm.out(int n, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + schema_order_cpp_signature: Tensor & (int64_t, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: randperm_out + operator_name: randperm + overload_name: generator_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::randperm.generator_out(int n, *, Generator? generator, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + - annotation: null + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + schema_order_cpp_signature: Tensor & (int64_t, c10::optional, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + - annotation: null + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: range + operator_name: range + overload_name: step + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::range.step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: start + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: end + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + name: step + type: Scalar + - annotation: null + default: '{}' + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + schema_order_cpp_signature: Tensor (Scalar, Scalar, Scalar, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: start + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: end + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + name: step + type: Scalar + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: range + operator_name: range + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::range(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: start + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: end + type: Scalar + - annotation: null + default: '{}' + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + schema_order_cpp_signature: Tensor (Scalar, Scalar, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: start + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: end + type: Scalar + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: range_out + operator_name: range + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::range.out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: start + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: end + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + name: step + type: Scalar + schema_order_cpp_signature: Tensor & (Scalar, Scalar, Scalar, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: start + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: end + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + name: step + type: Scalar + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: reciprocal + operator_name: reciprocal + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::reciprocal(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: reciprocal_ + operator_name: reciprocal_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::reciprocal_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + schema_order_cpp_signature: Tensor & (Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: reciprocal_out + operator_name: reciprocal + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::reciprocal.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: neg + operator_name: neg + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::neg(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: neg_ + operator_name: neg_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::neg_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + schema_order_cpp_signature: Tensor & (Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: neg_out + operator_name: neg + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::neg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: negative + operator_name: negative + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::negative(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: negative_ + operator_name: negative_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::negative_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + schema_order_cpp_signature: Tensor & (Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: negative_out + operator_name: negative + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::negative.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: repeat + operator_name: repeat + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::repeat(Tensor self, int[] repeats) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: repeats + type: IntArrayRef + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: repeats + type: IntArrayRef + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: repeat_interleave + operator_name: repeat_interleave + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::repeat_interleave.Tensor(Tensor repeats) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: repeats + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: repeats + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: repeat_interleave + operator_name: repeat_interleave + overload_name: self_Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::repeat_interleave.self_Tensor(Tensor self, Tensor repeats, int? dim=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: repeats + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: repeats + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: repeat_interleave + operator_name: repeat_interleave + overload_name: self_int + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::repeat_interleave.self_int(Tensor self, int repeats, int? dim=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: repeats + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, int64_t, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: repeats + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: reshape + operator_name: reshape + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::reshape(Tensor(a) self, int[] shape) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: shape + type: IntArrayRef + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef) + schema_order_arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: shape + type: IntArrayRef + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _mkldnn_reshape + operator_name: _mkldnn_reshape + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_mkldnn_reshape(Tensor self, int[] shape) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: shape + type: IntArrayRef + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: shape + type: IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: reshape_as + operator_name: reshape_as + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::reshape_as(Tensor(a) self, Tensor other) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: round + operator_name: round + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::round(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: round_ + operator_name: round_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::round_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + schema_order_cpp_signature: Tensor & (Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: round_out + operator_name: round + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::round.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: rrelu + operator_name: rrelu + overload_name: '' + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::rrelu(Tensor self, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: 0.125 + dynamic_type: Scalar + is_nullable: false + name: lower + type: Scalar + - annotation: null + default: 0.3333333333333333 + dynamic_type: Scalar + is_nullable: false + name: upper + type: Scalar + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: training + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: Generator + is_nullable: true + name: generator + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, Scalar, Scalar, bool, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: 0.125 + dynamic_type: Scalar + is_nullable: false + name: lower + type: Scalar + - annotation: null + default: 0.3333333333333333 + dynamic_type: Scalar + is_nullable: false + name: upper + type: Scalar + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: training + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: Generator + is_nullable: true + name: generator + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: rrelu_ + operator_name: rrelu_ + overload_name: '' + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::rrelu_(Tensor(a!) self, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + default: 0.125 + dynamic_type: Scalar + is_nullable: false + name: lower + type: Scalar + - annotation: null + default: 0.3333333333333333 + dynamic_type: Scalar + is_nullable: false + name: upper + type: Scalar + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: training + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: Generator + is_nullable: true + name: generator + type: c10::optional + schema_order_cpp_signature: Tensor & (Tensor &, Scalar, Scalar, bool, c10::optional) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + default: 0.125 + dynamic_type: Scalar + is_nullable: false + name: lower + type: Scalar + - annotation: null + default: 0.3333333333333333 + dynamic_type: Scalar + is_nullable: false + name: upper + type: Scalar + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: training + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: Generator + is_nullable: true + name: generator + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: relu + operator_name: relu + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::relu(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: relu_ + operator_name: relu_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::relu_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + schema_order_cpp_signature: Tensor & (Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: prelu + operator_name: prelu + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::prelu(Tensor self, Tensor weight) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: prelu_backward + operator_name: prelu_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::prelu_backward(Tensor grad_output, Tensor self, Tensor weight) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: gelu + operator_name: gelu + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::gelu(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: gelu_backward + operator_name: gelu_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::gelu_backward(Tensor grad, Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: infinitely_differentiable_gelu_backward + operator_name: infinitely_differentiable_gelu_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::infinitely_differentiable_gelu_backward(Tensor grad, Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: hardshrink + operator_name: hardshrink + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::hardshrink(Tensor self, Scalar lambd=0.5) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: 0.5 + dynamic_type: Scalar + is_nullable: false + name: lambd + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: 0.5 + dynamic_type: Scalar + is_nullable: false + name: lambd + type: Scalar + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: hardshrink_backward + operator_name: hardshrink_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::hardshrink_backward(Tensor grad_out, Tensor self, Scalar lambd) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_out + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: lambd + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_out + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: lambd + type: Scalar + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: rsqrt + operator_name: rsqrt + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::rsqrt(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: rsqrt_ + operator_name: rsqrt_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::rsqrt_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + schema_order_cpp_signature: Tensor & (Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: rsqrt_out + operator_name: rsqrt + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::rsqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: select + operator_name: select + overload_name: Dimname + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::select.Dimname(Tensor(a) self, Dimname dim, int index) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: index + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, Dimname, int64_t) + schema_order_arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: index + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: select + operator_name: select + overload_name: int + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::select.int(Tensor(a) self, int dim, int index) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: index + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, int64_t, int64_t) + schema_order_arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: index + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: select_backward + operator_name: select_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::select_backward(Tensor grad, int[] input_sizes, int dim, int index) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: input_sizes + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: index + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef, int64_t, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: input_sizes + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: index + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: selu + operator_name: selu + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::selu(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: selu_ + operator_name: selu_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::selu_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + schema_order_cpp_signature: Tensor & (Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: celu + operator_name: celu + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::celu(Tensor self, Scalar alpha=1.0) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: 1.0 + dynamic_type: Scalar + is_nullable: false + name: alpha + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: 1.0 + dynamic_type: Scalar + is_nullable: false + name: alpha + type: Scalar + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: celu_ + operator_name: celu_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::celu_(Tensor(a!) self, Scalar alpha=1.0) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + default: 1.0 + dynamic_type: Scalar + is_nullable: false + name: alpha + type: Scalar + schema_order_cpp_signature: Tensor & (Tensor &, Scalar) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + default: 1.0 + dynamic_type: Scalar + is_nullable: false + name: alpha + type: Scalar + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: silu + operator_name: silu + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::silu(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: silu_ + operator_name: silu_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::silu_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + schema_order_cpp_signature: Tensor & (Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: silu_out + operator_name: silu + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::silu.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: silu_backward + operator_name: silu_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::silu_backward(Tensor grad_output, Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sigmoid + operator_name: sigmoid + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::sigmoid(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sigmoid_ + operator_name: sigmoid_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::sigmoid_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + schema_order_cpp_signature: Tensor & (Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sigmoid_out + operator_name: sigmoid + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logit + operator_name: logit + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::logit(Tensor self, float? eps=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: eps + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: eps + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logit_ + operator_name: logit_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::logit_(Tensor(a!) self, float? eps=None) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: eps + type: c10::optional + schema_order_cpp_signature: Tensor & (Tensor &, c10::optional) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: eps + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logit_out + operator_name: logit + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: eps + type: c10::optional + schema_order_cpp_signature: Tensor & (const Tensor &, c10::optional, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: eps + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sin + operator_name: sin + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::sin(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sin_ + operator_name: sin_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::sin_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + schema_order_cpp_signature: Tensor & (Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sin_out + operator_name: sin + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::sin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sinh + operator_name: sinh + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::sinh(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sinh_ + operator_name: sinh_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::sinh_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + schema_order_cpp_signature: Tensor & (Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sinh_out + operator_name: sinh + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::sinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: detach + operator_name: detach + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::detach(Tensor(a) self) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: detach_ + operator_name: detach_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::detach_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + schema_order_cpp_signature: Tensor & (Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: size + operator_name: size + overload_name: int + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::size.int(Tensor self, int dim) -> int + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: int64_t (const Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: int64_t + name: result + type: int64_t + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: size + operator_name: size + overload_name: Dimname + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::size.Dimname(Tensor self, Dimname dim) -> int + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + schema_order_cpp_signature: int64_t (const Tensor &, Dimname) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: int64_t + name: result + type: int64_t + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: slice + operator_name: slice + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::slice.Tensor(Tensor(a) self, int dim=0, int start=0, int end=9223372036854775807, int step=1) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: start + type: int64_t + - annotation: null + default: 9223372036854775807 + dynamic_type: int64_t + is_nullable: false + name: end + type: int64_t + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: step + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, int64_t, int64_t, int64_t, int64_t) + schema_order_arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: start + type: int64_t + - annotation: null + default: 9223372036854775807 + dynamic_type: int64_t + is_nullable: false + name: end + type: int64_t + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: step + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: slice_backward + operator_name: slice_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::slice_backward(Tensor grad, int[] input_sizes, int dim, int start, int end, int step) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: input_sizes + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: start + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: end + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: step + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef, int64_t, int64_t, int64_t, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: input_sizes + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: start + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: end + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: step + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: slogdet + operator_name: slogdet + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::slogdet(Tensor self) -> (Tensor sign, Tensor logabsdet) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: std::tuple (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + field_name: sign + name: sign + type: Tensor + - dynamic_type: Tensor + field_name: logabsdet + name: logabsdet + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: smm + operator_name: smm + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::smm(Tensor self, Tensor mat2) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mat2 + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mat2 + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: softmax + operator_name: softmax + overload_name: int + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + name: dtype + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, int64_t, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + name: dtype + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: softmax + operator_name: softmax + overload_name: Dimname + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, Dimname, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _softmax + operator_name: _softmax + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_softmax(Tensor self, int dim, bool half_to_float) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: half_to_float + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, int64_t, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: half_to_float + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _softmax_backward_data + operator_name: _softmax_backward_data + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_softmax_backward_data(Tensor grad_output, Tensor output, int dim, Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: output + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, int64_t, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: output + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: unsafe_split + operator_name: unsafe_split + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::unsafe_split.Tensor(Tensor self, int split_size, int dim=0) -> Tensor[] + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: split_size + type: int64_t + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: std::vector (const Tensor &, int64_t, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: split_size + type: int64_t + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: TensorList + name: result + type: std::vector + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: split + operator_name: split + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::split.Tensor(Tensor(a) self, int split_size, int dim=0) -> Tensor(a)[] + arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: split_size + type: int64_t + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: std::vector (const Tensor &, int64_t, int64_t) + schema_order_arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: split_size + type: int64_t + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: TensorList + name: result + type: std::vector + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: unsafe_split_with_sizes + operator_name: unsafe_split_with_sizes + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::unsafe_split_with_sizes(Tensor self, int[] split_sizes, int dim=0) -> Tensor[] + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: split_sizes + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: std::vector (const Tensor &, IntArrayRef, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: split_sizes + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: TensorList + name: result + type: std::vector + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: split_with_sizes + operator_name: split_with_sizes + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::split_with_sizes(Tensor(a) self, int[] split_sizes, int dim=0) -> Tensor(a)[] + arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: split_sizes + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: std::vector (const Tensor &, IntArrayRef, int64_t) + schema_order_arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: split_sizes + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: TensorList + name: result + type: std::vector + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: squeeze + operator_name: squeeze + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::squeeze(Tensor(a) self) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: squeeze + operator_name: squeeze + overload_name: dim + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::squeeze.dim(Tensor(a) self, int dim) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, int64_t) + schema_order_arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: squeeze + operator_name: squeeze + overload_name: dimname + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::squeeze.dimname(Tensor(a) self, Dimname dim) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + schema_order_cpp_signature: Tensor (const Tensor &, Dimname) + schema_order_arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: squeeze_ + operator_name: squeeze_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::squeeze_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + schema_order_cpp_signature: Tensor & (Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: squeeze_ + operator_name: squeeze_ + overload_name: dim + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::squeeze_.dim(Tensor(a!) self, int dim) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: Tensor & (Tensor &, int64_t) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: squeeze_ + operator_name: squeeze_ + overload_name: dimname + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::squeeze_.dimname(Tensor(a!) self, Dimname dim) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + schema_order_cpp_signature: Tensor & (Tensor &, Dimname) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: sspaddmm + operator_name: sspaddmm + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::sspaddmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mat1 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mat2 + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: beta + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const Tensor &, Scalar, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mat1 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mat2 + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: beta + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sspaddmm_out + operator_name: sspaddmm + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::sspaddmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mat1 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mat2 + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: beta + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, const Tensor &, Scalar, Scalar, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mat1 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mat2 + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: beta + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: stack + operator_name: stack + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::stack(Tensor[] tensors, int dim=0) -> Tensor + arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors + type: TensorList + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: Tensor (TensorList, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors + type: TensorList + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: stack_out + operator_name: stack + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::stack.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors + type: TensorList + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: Tensor & (TensorList, int64_t, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors + type: TensorList + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: hstack + operator_name: hstack + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::hstack(Tensor[] tensors) -> Tensor + arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors + type: TensorList + schema_order_cpp_signature: Tensor (TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors + type: TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: hstack_out + operator_name: hstack + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::hstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors + type: TensorList + schema_order_cpp_signature: Tensor & (TensorList, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors + type: TensorList + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: vstack + operator_name: vstack + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::vstack(Tensor[] tensors) -> Tensor + arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors + type: TensorList + schema_order_cpp_signature: Tensor (TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors + type: TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: vstack_out + operator_name: vstack + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::vstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors + type: TensorList + schema_order_cpp_signature: Tensor & (TensorList, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors + type: TensorList + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: dstack + operator_name: dstack + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::dstack(Tensor[] tensors) -> Tensor + arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors + type: TensorList + schema_order_cpp_signature: Tensor (TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors + type: TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: dstack_out + operator_name: dstack + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::dstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors + type: TensorList + schema_order_cpp_signature: Tensor & (TensorList, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors + type: TensorList + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: stft + operator_name: stft + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::stft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool normalized=False, bool? onesided=None, bool? return_complex=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n_fft + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: hop_length + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: win_length + type: c10::optional + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: window + type: const c10::optional& + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: normalized + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + name: onesided + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + name: return_complex + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, int64_t, c10::optional, c10::optional, const c10::optional&, bool, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n_fft + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: hop_length + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: win_length + type: c10::optional + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: window + type: const c10::optional& + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: normalized + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + name: onesided + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + name: return_complex + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: istft + operator_name: istft + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::istft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, bool normalized=False, bool? onesided=None, int? length=None, bool return_complex=False) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n_fft + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: hop_length + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: win_length + type: c10::optional + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: window + type: const c10::optional& + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: center + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: normalized + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + name: onesided + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: length + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: return_complex + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, int64_t, c10::optional, c10::optional, const c10::optional&, bool, bool, c10::optional, c10::optional, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n_fft + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: hop_length + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: win_length + type: c10::optional + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: window + type: const c10::optional& + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: center + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: normalized + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + name: onesided + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: length + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: return_complex + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: stride + operator_name: stride + overload_name: int + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::stride.int(Tensor self, int dim) -> int + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: int64_t (const Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: int64_t + name: result + type: int64_t + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: stride + operator_name: stride + overload_name: Dimname + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::stride.Dimname(Tensor self, Dimname dim) -> int + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + schema_order_cpp_signature: int64_t (const Tensor &, Dimname) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: int64_t + name: result + type: int64_t + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: sum + operator_name: sum + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::sum(Tensor self, *, ScalarType? dtype=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sum + operator_name: sum + overload_name: dim_IntList + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::sum.dim_IntList(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dim + size: 1 + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef, bool, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dim + size: 1 + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sum + operator_name: sum + overload_name: dim_DimnameList + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::sum.dim_DimnameList(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: DimnameList + is_nullable: false + name: dim + size: 1 + type: DimnameList + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, DimnameList, bool, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: DimnameList + is_nullable: false + name: dim + size: 1 + type: DimnameList + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sum_out + operator_name: sum + overload_name: IntList_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::sum.IntList_out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dim + size: 1 + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: Tensor & (const Tensor &, IntArrayRef, bool, c10::optional, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dim + size: 1 + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sum_out + operator_name: sum + overload_name: DimnameList_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::sum.DimnameList_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: DimnameList + is_nullable: false + name: dim + size: 1 + type: DimnameList + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: Tensor & (const Tensor &, DimnameList, bool, c10::optional, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: DimnameList + is_nullable: false + name: dim + size: 1 + type: DimnameList + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: nansum + operator_name: nansum + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::nansum(Tensor self, *, ScalarType? dtype=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: nansum + operator_name: nansum + overload_name: dim_IntList + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::nansum.dim_IntList(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dim + size: 1 + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef, bool, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dim + size: 1 + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: nansum_out + operator_name: nansum + overload_name: IntList_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::nansum.IntList_out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dim + size: 1 + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: Tensor & (const Tensor &, IntArrayRef, bool, c10::optional, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dim + size: 1 + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sum_to_size + operator_name: sum_to_size + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::sum_to_size(Tensor self, int[] size) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: sqrt + operator_name: sqrt + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::sqrt(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sqrt_ + operator_name: sqrt_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::sqrt_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + schema_order_cpp_signature: Tensor & (Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sqrt_out + operator_name: sqrt + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::sqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: square + operator_name: square + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::square(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: square_ + operator_name: square_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::square_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + schema_order_cpp_signature: Tensor & (Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: std + operator_name: std + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::std(Tensor self, bool unbiased=True) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unbiased + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unbiased + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: std + operator_name: std + overload_name: dim + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::std.dim(Tensor self, int[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dim + size: 1 + type: IntArrayRef + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unbiased + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dim + size: 1 + type: IntArrayRef + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unbiased + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: std_mean + operator_name: std_mean + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::std_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unbiased + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unbiased + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: std_mean + operator_name: std_mean + overload_name: dim + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::std_mean.dim(Tensor self, int[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dim + size: 1 + type: IntArrayRef + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unbiased + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, IntArrayRef, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dim + size: 1 + type: IntArrayRef + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unbiased + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: std_mean + operator_name: std_mean + overload_name: names_dim + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::std_mean.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: DimnameList + is_nullable: false + name: dim + size: 1 + type: DimnameList + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unbiased + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, DimnameList, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: DimnameList + is_nullable: false + name: dim + size: 1 + type: DimnameList + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unbiased + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: std_out + operator_name: std + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::std.out(Tensor self, int[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dim + size: 1 + type: IntArrayRef + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unbiased + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: Tensor & (const Tensor &, IntArrayRef, bool, bool, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dim + size: 1 + type: IntArrayRef + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unbiased + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: std + operator_name: std + overload_name: names_dim + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::std.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: DimnameList + is_nullable: false + name: dim + size: 1 + type: DimnameList + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unbiased + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, DimnameList, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: DimnameList + is_nullable: false + name: dim + size: 1 + type: DimnameList + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unbiased + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: std_out + operator_name: std + overload_name: names_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::std.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: DimnameList + is_nullable: false + name: dim + size: 1 + type: DimnameList + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unbiased + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: Tensor & (const Tensor &, DimnameList, bool, bool, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: DimnameList + is_nullable: false + name: dim + size: 1 + type: DimnameList + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unbiased + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: prod + operator_name: prod + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::prod(Tensor self, *, ScalarType? dtype=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: prod + operator_name: prod + overload_name: dim_int + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::prod.dim_int(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, int64_t, bool, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: prod_out + operator_name: prod + overload_name: int_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::prod.int_out(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: Tensor & (const Tensor &, int64_t, bool, c10::optional, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: prod + operator_name: prod + overload_name: dim_Dimname + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::prod.dim_Dimname(Tensor self, Dimname dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, Dimname, bool, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: prod_out + operator_name: prod + overload_name: Dimname_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::prod.Dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: Tensor & (const Tensor &, Dimname, bool, c10::optional, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: t + operator_name: t + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::t(Tensor(a) self) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: t_ + operator_name: t_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::t_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + schema_order_cpp_signature: Tensor & (Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: tan + operator_name: tan + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::tan(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: tan_ + operator_name: tan_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::tan_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + schema_order_cpp_signature: Tensor & (Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: tan_out + operator_name: tan + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::tan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: tanh + operator_name: tanh + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::tanh(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: tanh_ + operator_name: tanh_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::tanh_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + schema_order_cpp_signature: Tensor & (Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: tanh_out + operator_name: tanh + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::tanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: tensordot + operator_name: tensordot + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::tensordot(Tensor self, Tensor other, int[] dims_self, int[] dims_other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dims_self + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dims_other + type: IntArrayRef + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, IntArrayRef, IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dims_self + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dims_other + type: IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: threshold + operator_name: threshold + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::threshold(Tensor self, Scalar threshold, Scalar value) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: threshold + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: value + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, Scalar, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: threshold + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: value + type: Scalar + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: threshold_ + operator_name: threshold_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::threshold_(Tensor(a!) self, Scalar threshold, Scalar value) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: threshold + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: value + type: Scalar + schema_order_cpp_signature: Tensor & (Tensor &, Scalar, Scalar) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: threshold + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: value + type: Scalar + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: threshold_out + operator_name: threshold + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::threshold.out(Tensor self, Scalar threshold, Scalar value, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: threshold + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: value + type: Scalar + schema_order_cpp_signature: Tensor & (const Tensor &, Scalar, Scalar, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: threshold + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: value + type: Scalar + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: threshold_backward + operator_name: threshold_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::threshold_backward(Tensor grad_output, Tensor self, Scalar threshold) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: threshold + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: threshold + type: Scalar + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: transpose + operator_name: transpose + overload_name: int + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::transpose.int(Tensor(a) self, int dim0, int dim1) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim0 + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim1 + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, int64_t, int64_t) + schema_order_arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim0 + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim1 + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: transpose + operator_name: transpose + overload_name: Dimname + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::transpose.Dimname(Tensor(a) self, Dimname dim0, Dimname dim1) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim0 + type: Dimname + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim1 + type: Dimname + schema_order_cpp_signature: Tensor (const Tensor &, Dimname, Dimname) + schema_order_arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim0 + type: Dimname + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim1 + type: Dimname + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _mkldnn_transpose + operator_name: _mkldnn_transpose + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_mkldnn_transpose(Tensor self, int dim0, int dim1) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim0 + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim1 + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, int64_t, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim0 + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim1 + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: transpose_ + operator_name: transpose_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::transpose_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim0 + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim1 + type: int64_t + schema_order_cpp_signature: Tensor & (Tensor &, int64_t, int64_t) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim0 + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim1 + type: int64_t + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _mkldnn_transpose_ + operator_name: _mkldnn_transpose_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_mkldnn_transpose_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim0 + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim1 + type: int64_t + schema_order_cpp_signature: Tensor & (Tensor &, int64_t, int64_t) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim0 + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim1 + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: one_hot + operator_name: one_hot + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::one_hot(Tensor self, int num_classes=-1) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: num_classes + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: num_classes + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: flip + operator_name: flip + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::flip(Tensor self, int[] dims) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dims + type: IntArrayRef + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dims + type: IntArrayRef + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: fliplr + operator_name: fliplr + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::fliplr(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: flipud + operator_name: flipud + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::flipud(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: roll + operator_name: roll + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::roll(Tensor self, int[1] shifts, int[1] dims=[]) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: shifts + size: 1 + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: IntArrayRef + is_nullable: false + name: dims + size: 1 + type: IntArrayRef + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef, IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: shifts + size: 1 + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: IntArrayRef + is_nullable: false + name: dims + size: 1 + type: IntArrayRef + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: rot90 + operator_name: rot90 + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::rot90(Tensor self, int k=1, int[] dims=[0,1]) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: k + type: int64_t + - annotation: null + default: '{0,1}' + dynamic_type: IntArrayRef + is_nullable: false + name: dims + type: IntArrayRef + schema_order_cpp_signature: Tensor (const Tensor &, int64_t, IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: k + type: int64_t + - annotation: null + default: '{0,1}' + dynamic_type: IntArrayRef + is_nullable: false + name: dims + type: IntArrayRef + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: trapz + operator_name: trapz + overload_name: x + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::trapz.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: y + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: x + type: const Tensor & + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: dim + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: y + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: x + type: const Tensor & + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: dim + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: trapz + operator_name: trapz + overload_name: dx + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::trapz.dx(Tensor y, *, float dx=1, int dim=-1) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: y + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: double + is_nullable: false + kwarg_only: true + name: dx + type: double + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: dim + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, double, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: y + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: double + is_nullable: false + kwarg_only: true + name: dx + type: double + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + kwarg_only: true + name: dim + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _trilinear + operator_name: _trilinear + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_trilinear(Tensor i1, Tensor i2, Tensor i3, int[] expand1, int[] expand2, int[] expand3, int[] sumdim, int unroll_dim=1) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: i1 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: i2 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: i3 + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: expand1 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: expand2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: expand3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: sumdim + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: unroll_dim + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: i1 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: i2 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: i3 + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: expand1 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: expand2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: expand3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: sumdim + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: unroll_dim + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: triplet_margin_loss + operator_name: triplet_margin_loss + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::triplet_margin_loss(Tensor anchor, Tensor positive, Tensor negative, float margin=1.0, float p=2, float eps=1e-06, bool swap=False, int reduction=Mean) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: anchor + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: positive + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: negative + type: const Tensor & + - annotation: null + default: 1.0 + dynamic_type: double + is_nullable: false + name: margin + type: double + - annotation: null + default: 2 + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + default: 1.0e-06 + dynamic_type: double + is_nullable: false + name: eps + type: double + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: swap + type: bool + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const Tensor &, double, double, double, bool, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: anchor + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: positive + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: negative + type: const Tensor & + - annotation: null + default: 1.0 + dynamic_type: double + is_nullable: false + name: margin + type: double + - annotation: null + default: 2 + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + default: 1.0e-06 + dynamic_type: double + is_nullable: false + name: eps + type: double + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: swap + type: bool + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: trunc + operator_name: trunc + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::trunc(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: trunc_ + operator_name: trunc_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::trunc_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + schema_order_cpp_signature: Tensor & (Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: trunc_out + operator_name: trunc + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::trunc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: fix + operator_name: fix + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::fix(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: fix_ + operator_name: fix_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::fix_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + schema_order_cpp_signature: Tensor & (Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: fix_out + operator_name: fix + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::fix.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: type_as + operator_name: type_as + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::type_as(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _has_compatible_shallow_copy_type + operator_name: _has_compatible_shallow_copy_type + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_has_compatible_shallow_copy_type(Tensor self, Tensor from) -> bool + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: from + type: const Tensor & + schema_order_cpp_signature: bool (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: from + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: bool + name: result + type: bool + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _unique + operator_name: _unique + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_unique(Tensor self, bool sorted=True, bool return_inverse=False) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: sorted + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: return_inverse + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: sorted + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: return_inverse + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: unique_dim + operator_name: unique_dim + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::unique_dim(Tensor self, int dim, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: sorted + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: return_inverse + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: return_counts + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, int64_t, bool, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: sorted + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: return_inverse + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: return_counts + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + - dynamic_type: Tensor + name: result2 + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: unique_consecutive + operator_name: unique_consecutive + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::unique_consecutive(Tensor self, bool return_inverse=False, bool return_counts=False, int? dim=None) -> (Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: return_inverse + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: return_counts + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + schema_order_cpp_signature: std::tuple (const Tensor &, bool, bool, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: return_inverse + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: return_counts + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + - dynamic_type: Tensor + name: result2 + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: unique_dim_consecutive + operator_name: unique_dim_consecutive + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::unique_dim_consecutive(Tensor self, int dim, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: return_inverse + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: return_counts + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, int64_t, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: return_inverse + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: return_counts + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + - dynamic_type: Tensor + name: result2 + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _unique2 + operator_name: _unique2 + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_unique2(Tensor self, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: sorted + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: return_inverse + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: return_counts + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, bool, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: sorted + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: return_inverse + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: return_counts + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + - dynamic_type: Tensor + name: result2 + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _unsafe_view + operator_name: _unsafe_view + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_unsafe_view(Tensor self, int[] size) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: unsqueeze + operator_name: unsqueeze + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::unsqueeze(Tensor(a) self, int dim) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, int64_t) + schema_order_arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: unsqueeze_ + operator_name: unsqueeze_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::unsqueeze_(Tensor(a!) self, int dim) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: Tensor & (Tensor &, int64_t) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: vander + operator_name: vander + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::vander(Tensor x, int? N=None, bool increasing=False) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: x + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: N + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: increasing + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, c10::optional, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: x + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: N + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: increasing + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: var + operator_name: var + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::var(Tensor self, bool unbiased=True) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unbiased + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unbiased + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: var + operator_name: var + overload_name: dim + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::var.dim(Tensor self, int[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dim + size: 1 + type: IntArrayRef + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unbiased + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dim + size: 1 + type: IntArrayRef + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unbiased + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: var_out + operator_name: var + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::var.out(Tensor self, int[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dim + size: 1 + type: IntArrayRef + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unbiased + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: Tensor & (const Tensor &, IntArrayRef, bool, bool, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dim + size: 1 + type: IntArrayRef + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unbiased + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: var + operator_name: var + overload_name: names_dim + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::var.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: DimnameList + is_nullable: false + name: dim + size: 1 + type: DimnameList + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unbiased + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, DimnameList, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: DimnameList + is_nullable: false + name: dim + size: 1 + type: DimnameList + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unbiased + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: var_out + operator_name: var + overload_name: names_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::var.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: DimnameList + is_nullable: false + name: dim + size: 1 + type: DimnameList + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unbiased + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: Tensor & (const Tensor &, DimnameList, bool, bool, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: DimnameList + is_nullable: false + name: dim + size: 1 + type: DimnameList + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unbiased + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: var_mean + operator_name: var_mean + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::var_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unbiased + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unbiased + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: var_mean + operator_name: var_mean + overload_name: dim + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::var_mean.dim(Tensor self, int[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dim + size: 1 + type: IntArrayRef + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unbiased + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, IntArrayRef, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dim + size: 1 + type: IntArrayRef + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unbiased + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: var_mean + operator_name: var_mean + overload_name: names_dim + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::var_mean.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: DimnameList + is_nullable: false + name: dim + size: 1 + type: DimnameList + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unbiased + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, DimnameList, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: DimnameList + is_nullable: false + name: dim + size: 1 + type: DimnameList + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unbiased + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: view_as + operator_name: view_as + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::view_as(Tensor(a) self, Tensor other) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: where + operator_name: where + overload_name: self + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::where.self(Tensor condition, Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: condition + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: condition + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: where + operator_name: where + overload_name: ScalarSelf + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::where.ScalarSelf(Tensor condition, Scalar self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: condition + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: self + type: Scalar + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, Scalar, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: condition + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: self + type: Scalar + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: where + operator_name: where + overload_name: ScalarOther + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::where.ScalarOther(Tensor condition, Tensor self, Scalar other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: condition + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: condition + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: where + operator_name: where + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::where.Scalar(Tensor condition, Scalar self, Scalar other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: condition + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: self + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, Scalar, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: condition + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: self + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: where + operator_name: where + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::where(Tensor condition) -> Tensor[] + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: condition + type: const Tensor & + schema_order_cpp_signature: std::vector (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: condition + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: TensorList + name: result + type: std::vector + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _s_where + operator_name: _s_where + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_s_where(Tensor condition, Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: condition + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: condition + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: norm_except_dim + operator_name: norm_except_dim + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::norm_except_dim(Tensor v, int pow=2, int dim=0) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: v + type: const Tensor & + - annotation: null + default: 2 + dynamic_type: int64_t + is_nullable: false + name: pow + type: int64_t + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, int64_t, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: v + type: const Tensor & + - annotation: null + default: 2 + dynamic_type: int64_t + is_nullable: false + name: pow + type: int64_t + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _weight_norm + operator_name: _weight_norm + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_weight_norm(Tensor v, Tensor g, int dim=0) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: v + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: g + type: const Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: v + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: g + type: const Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _weight_norm_cuda_interface + operator_name: _weight_norm_cuda_interface + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_weight_norm_cuda_interface(Tensor v, Tensor g, int dim=0) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: v + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: g + type: const Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: v + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: g + type: const Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _weight_norm_cuda_interface_backward + operator_name: _weight_norm_cuda_interface_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_weight_norm_cuda_interface_backward(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_w + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: saved_v + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: saved_g + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: saved_norms + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, const Tensor &, const Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_w + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: saved_v + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: saved_g + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: saved_norms + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _weight_norm_differentiable_backward + operator_name: _weight_norm_differentiable_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_weight_norm_differentiable_backward(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_w + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: saved_v + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: saved_g + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: saved_norms + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, const Tensor &, const Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_w + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: saved_v + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: saved_g + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: saved_norms + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: zeros + operator_name: zeros + overload_name: names + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::zeros.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + dynamic_type: DimnameList + is_nullable: true + kwarg_only: true + name: names + type: c10::optional + - annotation: null + default: '{}' + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + schema_order_cpp_signature: Tensor (IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + dynamic_type: DimnameList + is_nullable: true + kwarg_only: true + name: names + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: zeros + operator_name: zeros + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::zeros(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + schema_order_cpp_signature: Tensor (IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: zeros_out + operator_name: zeros + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::zeros.out(int[] size, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + schema_order_cpp_signature: Tensor & (IntArrayRef, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: zeros_like + operator_name: zeros_like + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::zeros_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: '{}' + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + - annotation: null + default: c10::nullopt + dynamic_type: MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _standard_gamma_grad + operator_name: _standard_gamma_grad + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_standard_gamma_grad(Tensor self, Tensor output) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: output + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: output + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _standard_gamma + operator_name: _standard_gamma + overload_name: '' + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_standard_gamma(Tensor self, Generator? generator=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: Generator + is_nullable: true + name: generator + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: Generator + is_nullable: true + name: generator + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _dirichlet_grad + operator_name: _dirichlet_grad + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_dirichlet_grad(Tensor x, Tensor alpha, Tensor total) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: x + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: alpha + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: total + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: x + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: alpha + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: total + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _sample_dirichlet + operator_name: _sample_dirichlet + overload_name: '' + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_sample_dirichlet(Tensor self, Generator? generator=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: Generator + is_nullable: true + name: generator + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: Generator + is_nullable: true + name: generator + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: poisson + operator_name: poisson + overload_name: '' + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::poisson(Tensor self, Generator? generator=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: Generator + is_nullable: true + name: generator + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: Generator + is_nullable: true + name: generator + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: binomial + operator_name: binomial + overload_name: '' + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::binomial(Tensor count, Tensor prob, Generator? generator=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: count + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: prob + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: Generator + is_nullable: true + name: generator + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: count + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: prob + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: Generator + is_nullable: true + name: generator + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: native_norm + operator_name: native_norm + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::native_norm(Tensor self, Scalar p=2) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: 2 + dynamic_type: Scalar + is_nullable: false + name: p + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: 2 + dynamic_type: Scalar + is_nullable: false + name: p + type: Scalar + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: native_norm + operator_name: native_norm + overload_name: ScalarOpt_dim_dtype + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::native_norm.ScalarOpt_dim_dtype(Tensor self, Scalar? p, int[1] dim, bool keepdim, ScalarType? dtype) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: true + name: p + type: c10::optional + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dim + size: 1 + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + dynamic_type: ScalarType + is_nullable: true + name: dtype + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, c10::optional, IntArrayRef, bool, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: true + name: p + type: c10::optional + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dim + size: 1 + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + dynamic_type: ScalarType + is_nullable: true + name: dtype + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _sparse_sum + operator_name: _sparse_sum + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_sparse_sum(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _sparse_sum + operator_name: _sparse_sum + overload_name: dtype + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_sparse_sum.dtype(Tensor self, *, ScalarType dtype) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: ScalarType + is_nullable: false + kwarg_only: true + name: dtype + type: ScalarType + schema_order_cpp_signature: Tensor (const Tensor &, ScalarType) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: ScalarType + is_nullable: false + kwarg_only: true + name: dtype + type: ScalarType + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _sparse_sum + operator_name: _sparse_sum + overload_name: dim + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_sparse_sum.dim(Tensor self, int[1] dim) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dim + size: 1 + type: IntArrayRef + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dim + size: 1 + type: IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _sparse_sum + operator_name: _sparse_sum + overload_name: dim_dtype + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_sparse_sum.dim_dtype(Tensor self, int[1] dim, *, ScalarType dtype) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dim + size: 1 + type: IntArrayRef + - annotation: null + dynamic_type: ScalarType + is_nullable: false + kwarg_only: true + name: dtype + type: ScalarType + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef, ScalarType) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dim + size: 1 + type: IntArrayRef + - annotation: null + dynamic_type: ScalarType + is_nullable: false + kwarg_only: true + name: dtype + type: ScalarType + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _sparse_sum_backward + operator_name: _sparse_sum_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_sparse_sum_backward(Tensor grad, Tensor self, int[] dim) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dim + type: IntArrayRef + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dim + type: IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _sparse_softmax + operator_name: _sparse_softmax + overload_name: int + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_sparse_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + name: dtype + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, int64_t, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + name: dtype + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _sparse_softmax + operator_name: _sparse_softmax + overload_name: Dimname + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_sparse_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, Dimname, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _sparse_softmax + operator_name: _sparse_softmax + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_sparse_softmax(Tensor self, int dim, bool half_to_float) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: half_to_float + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, int64_t, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: half_to_float + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _sparse_softmax_backward_data + operator_name: _sparse_softmax_backward_data + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_sparse_softmax_backward_data(Tensor grad_output, Tensor output, int dim, Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: output + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, int64_t, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: output + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _sparse_log_softmax + operator_name: _sparse_log_softmax + overload_name: int + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_sparse_log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + name: dtype + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, int64_t, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + name: dtype + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _sparse_log_softmax + operator_name: _sparse_log_softmax + overload_name: Dimname + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_sparse_log_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, Dimname, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _sparse_log_softmax + operator_name: _sparse_log_softmax + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_sparse_log_softmax(Tensor self, int dim, bool half_to_float) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: half_to_float + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, int64_t, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: bool + is_nullable: false + name: half_to_float + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _sparse_log_softmax_backward_data + operator_name: _sparse_log_softmax_backward_data + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_sparse_log_softmax_backward_data(Tensor grad_output, Tensor output, int dim, Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: output + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, int64_t, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: output + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: norm + operator_name: norm + overload_name: ScalarOpt_dtype + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::norm.ScalarOpt_dtype(Tensor self, Scalar? p, *, ScalarType dtype) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: true + name: p + type: c10::optional + - annotation: null + dynamic_type: ScalarType + is_nullable: false + kwarg_only: true + name: dtype + type: ScalarType + schema_order_cpp_signature: Tensor (const Tensor &, c10::optional, ScalarType) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: true + name: p + type: c10::optional + - annotation: null + dynamic_type: ScalarType + is_nullable: false + kwarg_only: true + name: dtype + type: ScalarType + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: norm + operator_name: norm + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::norm.Scalar(Tensor self, Scalar p=2) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: 2 + dynamic_type: Scalar + is_nullable: false + name: p + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: 2 + dynamic_type: Scalar + is_nullable: false + name: p + type: Scalar + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: norm + operator_name: norm + overload_name: ScalarOpt_dim_dtype + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::norm.ScalarOpt_dim_dtype(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: true + name: p + type: c10::optional + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dim + size: 1 + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + dynamic_type: ScalarType + is_nullable: false + kwarg_only: true + name: dtype + type: ScalarType + schema_order_cpp_signature: Tensor (const Tensor &, c10::optional, IntArrayRef, bool, ScalarType) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: true + name: p + type: c10::optional + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dim + size: 1 + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + dynamic_type: ScalarType + is_nullable: false + kwarg_only: true + name: dtype + type: ScalarType + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: norm + operator_name: norm + overload_name: ScalarOpt_dim + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::norm.ScalarOpt_dim(Tensor self, Scalar? p, int[1] dim, bool keepdim=False) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: true + name: p + type: c10::optional + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dim + size: 1 + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, c10::optional, IntArrayRef, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: true + name: p + type: c10::optional + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dim + size: 1 + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: norm_out + operator_name: norm + overload_name: dtype_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::norm.dtype_out(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: true + name: p + type: c10::optional + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dim + size: 1 + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + dynamic_type: ScalarType + is_nullable: false + kwarg_only: true + name: dtype + type: ScalarType + schema_order_cpp_signature: Tensor & (const Tensor &, c10::optional, IntArrayRef, bool, ScalarType, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: true + name: p + type: c10::optional + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dim + size: 1 + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + dynamic_type: ScalarType + is_nullable: false + kwarg_only: true + name: dtype + type: ScalarType + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: norm_out + operator_name: norm + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::norm.out(Tensor self, Scalar? p, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: true + name: p + type: c10::optional + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dim + size: 1 + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: Tensor & (const Tensor &, c10::optional, IntArrayRef, bool, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: true + name: p + type: c10::optional + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dim + size: 1 + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: norm + operator_name: norm + overload_name: names_ScalarOpt_dim_dtype + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::norm.names_ScalarOpt_dim_dtype(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: true + name: p + type: c10::optional + - annotation: null + dynamic_type: DimnameList + is_nullable: false + name: dim + size: 1 + type: DimnameList + - annotation: null + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + dynamic_type: ScalarType + is_nullable: false + kwarg_only: true + name: dtype + type: ScalarType + schema_order_cpp_signature: Tensor (const Tensor &, c10::optional, DimnameList, bool, ScalarType) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: true + name: p + type: c10::optional + - annotation: null + dynamic_type: DimnameList + is_nullable: false + name: dim + size: 1 + type: DimnameList + - annotation: null + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + dynamic_type: ScalarType + is_nullable: false + kwarg_only: true + name: dtype + type: ScalarType + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: norm + operator_name: norm + overload_name: names_ScalarOpt_dim + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::norm.names_ScalarOpt_dim(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim=False) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: true + name: p + type: c10::optional + - annotation: null + dynamic_type: DimnameList + is_nullable: false + name: dim + size: 1 + type: DimnameList + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, c10::optional, DimnameList, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: true + name: p + type: c10::optional + - annotation: null + dynamic_type: DimnameList + is_nullable: false + name: dim + size: 1 + type: DimnameList + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: norm_out + operator_name: norm + overload_name: names_dtype_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::norm.names_dtype_out(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: true + name: p + type: c10::optional + - annotation: null + dynamic_type: DimnameList + is_nullable: false + name: dim + size: 1 + type: DimnameList + - annotation: null + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + dynamic_type: ScalarType + is_nullable: false + kwarg_only: true + name: dtype + type: ScalarType + schema_order_cpp_signature: Tensor & (const Tensor &, c10::optional, DimnameList, bool, ScalarType, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: true + name: p + type: c10::optional + - annotation: null + dynamic_type: DimnameList + is_nullable: false + name: dim + size: 1 + type: DimnameList + - annotation: null + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + dynamic_type: ScalarType + is_nullable: false + kwarg_only: true + name: dtype + type: ScalarType + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: norm_out + operator_name: norm + overload_name: names_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::norm.names_out(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: true + name: p + type: c10::optional + - annotation: null + dynamic_type: DimnameList + is_nullable: false + name: dim + size: 1 + type: DimnameList + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: Tensor & (const Tensor &, c10::optional, DimnameList, bool, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: true + name: p + type: c10::optional + - annotation: null + dynamic_type: DimnameList + is_nullable: false + name: dim + size: 1 + type: DimnameList + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: frobenius_norm + operator_name: frobenius_norm + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::frobenius_norm(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: frobenius_norm + operator_name: frobenius_norm + overload_name: dim + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::frobenius_norm.dim(Tensor self, int[1] dim, bool keepdim=False) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dim + size: 1 + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dim + size: 1 + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: frobenius_norm_out + operator_name: frobenius_norm + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::frobenius_norm.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dim + size: 1 + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: Tensor & (const Tensor &, IntArrayRef, bool, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dim + size: 1 + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: nuclear_norm + operator_name: nuclear_norm + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::nuclear_norm(Tensor self, bool keepdim=False) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: nuclear_norm_out + operator_name: nuclear_norm + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::nuclear_norm.out(Tensor self, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: Tensor & (const Tensor &, bool, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: nuclear_norm + operator_name: nuclear_norm + overload_name: dim + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::nuclear_norm.dim(Tensor self, int[2] dim, bool keepdim=False) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dim + size: 2 + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dim + size: 2 + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: nuclear_norm_out + operator_name: nuclear_norm + overload_name: dim_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::nuclear_norm.dim_out(Tensor self, int[2] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dim + size: 2 + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: Tensor & (const Tensor &, IntArrayRef, bool, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dim + size: 2 + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: clone + operator_name: clone + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::clone(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: resize_as_ + operator_name: resize_as_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::resize_as_(Tensor(a!) self, Tensor the_template, *, MemoryFormat? memory_format=None) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: the_template + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + schema_order_cpp_signature: Tensor & (Tensor &, const Tensor &, c10::optional) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: the_template + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: zero_ + operator_name: zero_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::zero_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + schema_order_cpp_signature: Tensor & (Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sub_out + operator_name: sub + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::sub.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, Scalar, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sub + operator_name: sub + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::sub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sub_ + operator_name: sub_ + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::sub_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + schema_order_cpp_signature: Tensor & (Tensor &, const Tensor &, Scalar) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sub + operator_name: sub + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::sub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + name: alpha + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, Scalar, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + name: alpha + type: Scalar + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sub_ + operator_name: sub_ + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::sub_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + name: alpha + type: Scalar + schema_order_cpp_signature: Tensor & (Tensor &, Scalar, Scalar) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + name: alpha + type: Scalar + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: subtract_out + operator_name: subtract + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::subtract.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, Scalar, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: subtract + operator_name: subtract + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::subtract.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: subtract_ + operator_name: subtract_ + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::subtract_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + schema_order_cpp_signature: Tensor & (Tensor &, const Tensor &, Scalar) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: subtract + operator_name: subtract + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::subtract.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + name: alpha + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, Scalar, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + name: alpha + type: Scalar + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: subtract_ + operator_name: subtract_ + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::subtract_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + name: alpha + type: Scalar + schema_order_cpp_signature: Tensor & (Tensor &, Scalar, Scalar) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + name: alpha + type: Scalar + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: rsub + operator_name: rsub + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::rsub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: heaviside_out + operator_name: heaviside + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::heaviside.out(Tensor self, Tensor values, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: values + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: values + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: heaviside + operator_name: heaviside + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::heaviside(Tensor self, Tensor values) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: values + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: values + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: heaviside_ + operator_name: heaviside_ + overload_name: '' + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::heaviside_(Tensor(a!) self, Tensor values) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: values + type: const Tensor & + schema_order_cpp_signature: Tensor & (Tensor &, const Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: values + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: rsub + operator_name: rsub + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::rsub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + name: alpha + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, Scalar, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + name: alpha + type: Scalar + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _sparse_addmm + operator_name: _sparse_addmm + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_sparse_addmm(Tensor self, Tensor sparse, Tensor dense, *, Scalar beta=1, Scalar alpha=1) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: sparse + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: dense + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: beta + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const Tensor &, Scalar, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: sparse + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: dense + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: beta + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: addmm_out + operator_name: addmm + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mat1 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mat2 + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: beta + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, const Tensor &, Scalar, Scalar, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mat1 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mat2 + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: beta + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: addmm + operator_name: addmm + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mat1 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mat2 + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: beta + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const Tensor &, Scalar, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mat1 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mat2 + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: beta + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: addmm_ + operator_name: addmm_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::addmm_(Tensor(a!) self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mat1 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mat2 + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: beta + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + schema_order_cpp_signature: Tensor & (Tensor &, const Tensor &, const Tensor &, Scalar, Scalar) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mat1 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mat2 + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: beta + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sparse_coo_tensor + operator_name: sparse_coo_tensor + overload_name: size + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::sparse_coo_tensor.size(int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + schema_order_cpp_signature: Tensor (IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sparse_coo_tensor + operator_name: sparse_coo_tensor + overload_name: indices + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::sparse_coo_tensor.indices(Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: values + type: const Tensor & + - annotation: null + default: '{}' + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: values + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sparse_coo_tensor + operator_name: sparse_coo_tensor + overload_name: indices_size + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::sparse_coo_tensor.indices_size(Tensor indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: values + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: values + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _sparse_coo_tensor_unsafe + operator_name: _sparse_coo_tensor_unsafe + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_sparse_coo_tensor_unsafe(Tensor indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: values + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: values + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _validate_sparse_coo_tensor_args + operator_name: _validate_sparse_coo_tensor_args + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_validate_sparse_coo_tensor_args(Tensor indices, Tensor values, int[] size) -> () + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: values + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + schema_order_cpp_signature: void (const Tensor &, const Tensor &, IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: values + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _sparse_coo_tensor_with_dims + operator_name: _sparse_coo_tensor_with_dims + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_sparse_coo_tensor_with_dims(int sparse_dim, int dense_dim, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: sparse_dim + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dense_dim + type: int64_t + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + schema_order_cpp_signature: Tensor (int64_t, int64_t, IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: sparse_dim + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dense_dim + type: int64_t + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: true + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _sparse_coo_tensor_with_dims_and_tensors + operator_name: _sparse_coo_tensor_with_dims_and_tensors + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_sparse_coo_tensor_with_dims_and_tensors(int sparse_dim, int dense_dim, int[] size, Tensor indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor + arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: sparse_dim + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dense_dim + type: int64_t + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: values + type: const Tensor & + - annotation: null + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + schema_order_cpp_signature: Tensor (int64_t, int64_t, IntArrayRef, const Tensor &, const Tensor &, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: sparse_dim + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dense_dim + type: int64_t + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: values + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: true + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sparse_resize_ + operator_name: sparse_resize_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::sparse_resize_(Tensor(a!) self, int[] size, int sparse_dim, int dense_dim) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: sparse_dim + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dense_dim + type: int64_t + schema_order_cpp_signature: Tensor & (Tensor &, IntArrayRef, int64_t, int64_t) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: sparse_dim + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dense_dim + type: int64_t + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sparse_resize_and_clear_ + operator_name: sparse_resize_and_clear_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::sparse_resize_and_clear_(Tensor(a!) self, int[] size, int sparse_dim, int dense_dim) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: sparse_dim + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dense_dim + type: int64_t + schema_order_cpp_signature: Tensor & (Tensor &, IntArrayRef, int64_t, int64_t) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: sparse_dim + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dense_dim + type: int64_t + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sparse_mask + operator_name: sparse_mask + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::sparse_mask(Tensor self, Tensor mask) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mask + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mask + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: to_dense + operator_name: to_dense + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::to_dense(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: to_dense_backward + operator_name: to_dense_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::to_dense_backward(Tensor grad, Tensor input) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sparse_dim + operator_name: sparse_dim + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::sparse_dim(Tensor self) -> int + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: int64_t (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: int64_t + name: result + type: int64_t + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _dimI + operator_name: _dimI + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_dimI(Tensor self) -> int + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: int64_t (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: int64_t + name: result + type: int64_t + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: dense_dim + operator_name: dense_dim + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::dense_dim(Tensor self) -> int + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: int64_t (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: int64_t + name: result + type: int64_t + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _dimV + operator_name: _dimV + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_dimV(Tensor self) -> int + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: int64_t (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: int64_t + name: result + type: int64_t + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _nnz + operator_name: _nnz + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_nnz(Tensor self) -> int + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: int64_t (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: int64_t + name: result + type: int64_t + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: coalesce + operator_name: coalesce + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::coalesce(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: is_coalesced + operator_name: is_coalesced + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::is_coalesced(Tensor self) -> bool + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: bool (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: bool + name: result + type: bool + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _indices + operator_name: _indices + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_indices(Tensor(a) self) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _values + operator_name: _values + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_values(Tensor(a) self) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _coalesced_ + operator_name: _coalesced_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_coalesced_(Tensor(a!) self, bool coalesced) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: bool + is_nullable: false + name: coalesced + type: bool + schema_order_cpp_signature: Tensor & (Tensor &, bool) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: bool + is_nullable: false + name: coalesced + type: bool + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: indices + operator_name: indices + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::indices(Tensor(a) self) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: values + operator_name: values + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::values(Tensor(a) self) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: hspmm_out + operator_name: hspmm + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::hspmm.out(Tensor mat1, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mat1 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mat2 + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mat1 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mat2 + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: hspmm + operator_name: hspmm + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::hspmm(Tensor mat1, Tensor mat2) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mat1 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mat2 + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mat1 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mat2 + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: copy_sparse_to_sparse_ + operator_name: copy_sparse_to_sparse_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::copy_sparse_to_sparse_(Tensor(a!) self, Tensor src, bool non_blocking=False) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: src + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: non_blocking + type: bool + schema_order_cpp_signature: Tensor & (Tensor &, const Tensor &, bool) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: src + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: non_blocking + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: unbind + operator_name: unbind + overload_name: int + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::unbind.int(Tensor(a) self, int dim=0) -> Tensor(a)[] + arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: std::vector (const Tensor &, int64_t) + schema_order_arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: TensorList + name: result + type: std::vector + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: unbind + operator_name: unbind + overload_name: Dimname + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::unbind.Dimname(Tensor(a) self, Dimname dim) -> Tensor(a)[] + arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + schema_order_cpp_signature: std::vector (const Tensor &, Dimname) + schema_order_arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: TensorList + name: result + type: std::vector + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: to_sparse + operator_name: to_sparse + overload_name: sparse_dim + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::to_sparse.sparse_dim(Tensor self, int sparse_dim) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: sparse_dim + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: sparse_dim + type: int64_t + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: to_sparse + operator_name: to_sparse + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::to_sparse(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: to_mkldnn + operator_name: to_mkldnn + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::to_mkldnn(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: mkldnn_reorder_conv2d_weight + operator_name: mkldnn_reorder_conv2d_weight + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::mkldnn_reorder_conv2d_weight(Tensor self, int[2] padding=0, int[2] stride=1, int[2] dilation=1, int groups=1) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: mkldnn_reorder_conv3d_weight + operator_name: mkldnn_reorder_conv3d_weight + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::mkldnn_reorder_conv3d_weight(Tensor self, int[3] padding=0, int[3] stride=1, int[3] dilation=1, int groups=1) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 3 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 3 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 3 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 3 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 3 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 3 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: int64_t + is_nullable: false + name: groups + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: to_mkldnn_backward + operator_name: to_mkldnn_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::to_mkldnn_backward(Tensor grad, Tensor input) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: quantize_per_tensor + operator_name: quantize_per_tensor + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::quantize_per_tensor(Tensor self, float scale, int zero_point, ScalarType dtype) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: scale + type: double + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: zero_point + type: int64_t + - annotation: null + dynamic_type: ScalarType + is_nullable: false + name: dtype + type: ScalarType + schema_order_cpp_signature: Tensor (const Tensor &, double, int64_t, ScalarType) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: scale + type: double + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: zero_point + type: int64_t + - annotation: null + dynamic_type: ScalarType + is_nullable: false + name: dtype + type: ScalarType + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: quantize_per_tensor + operator_name: quantize_per_tensor + overload_name: tensors + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::quantize_per_tensor.tensors(Tensor[] tensors, Tensor scales, Tensor zero_points, ScalarType dtype) -> Tensor[] + arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors + type: TensorList + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: scales + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: zero_points + type: const Tensor & + - annotation: null + dynamic_type: ScalarType + is_nullable: false + name: dtype + type: ScalarType + schema_order_cpp_signature: std::vector (TensorList, const Tensor &, const Tensor &, ScalarType) + schema_order_arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors + type: TensorList + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: scales + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: zero_points + type: const Tensor & + - annotation: null + dynamic_type: ScalarType + is_nullable: false + name: dtype + type: ScalarType + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: TensorList + name: result + type: std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: quantize_per_channel + operator_name: quantize_per_channel + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::quantize_per_channel(Tensor self, Tensor scales, Tensor zero_points, int axis, ScalarType dtype) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: scales + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: zero_points + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: axis + type: int64_t + - annotation: null + dynamic_type: ScalarType + is_nullable: false + name: dtype + type: ScalarType + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const Tensor &, int64_t, ScalarType) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: scales + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: zero_points + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: axis + type: int64_t + - annotation: null + dynamic_type: ScalarType + is_nullable: false + name: dtype + type: ScalarType + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: dequantize + operator_name: dequantize + overload_name: self + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::dequantize.self(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: dequantize + operator_name: dequantize + overload_name: tensors + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::dequantize.tensors(Tensor[] tensors) -> Tensor[] + arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors + type: TensorList + schema_order_cpp_signature: std::vector (TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors + type: TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: TensorList + name: result + type: std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: q_scale + operator_name: q_scale + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::q_scale(Tensor self) -> float + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: double (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: double + name: result + type: double + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: q_zero_point + operator_name: q_zero_point + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::q_zero_point(Tensor self) -> int + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: int64_t (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: int64_t + name: result + type: int64_t + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: q_per_channel_scales + operator_name: q_per_channel_scales + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::q_per_channel_scales(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: q_per_channel_zero_points + operator_name: q_per_channel_zero_points + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::q_per_channel_zero_points(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: q_per_channel_axis + operator_name: q_per_channel_axis + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::q_per_channel_axis(Tensor self) -> int + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: int64_t (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: int64_t + name: result + type: int64_t + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: int_repr + operator_name: int_repr + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::int_repr(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _make_per_tensor_quantized_tensor + operator_name: _make_per_tensor_quantized_tensor + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_make_per_tensor_quantized_tensor(Tensor self, float scale, int zero_point) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: scale + type: double + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: zero_point + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, double, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: scale + type: double + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: zero_point + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _make_per_channel_quantized_tensor + operator_name: _make_per_channel_quantized_tensor + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_make_per_channel_quantized_tensor(Tensor self, Tensor scale, Tensor zero_point, int axis) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: scale + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: zero_point + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: axis + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: scale + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: zero_point + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: axis + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: qscheme + operator_name: qscheme + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::qscheme(Tensor self) -> QScheme + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: QScheme (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: QScheme + name: result + type: QScheme + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: fake_quantize_per_tensor_affine + operator_name: fake_quantize_per_tensor_affine + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::fake_quantize_per_tensor_affine(Tensor self, float scale, int zero_point, int quant_min, int quant_max) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: scale + type: double + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: zero_point + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_min + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_max + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, double, int64_t, int64_t, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: scale + type: double + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: zero_point + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_min + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_max + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: fake_quantize_per_tensor_affine_backward + operator_name: fake_quantize_per_tensor_affine_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::fake_quantize_per_tensor_affine_backward(Tensor grad, Tensor self, float scale, int zero_point, int quant_min, int quant_max) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: scale + type: double + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: zero_point + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_min + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_max + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, double, int64_t, int64_t, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: scale + type: double + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: zero_point + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_min + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_max + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _fake_quantize_learnable_per_tensor_affine + operator_name: _fake_quantize_learnable_per_tensor_affine + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_fake_quantize_learnable_per_tensor_affine(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: scale + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: zero_point + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_min + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_max + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const Tensor &, int64_t, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: scale + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: zero_point + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_min + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_max + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _fake_quantize_learnable_per_tensor_affine_backward + operator_name: _fake_quantize_learnable_per_tensor_affine_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_fake_quantize_learnable_per_tensor_affine_backward(Tensor grad, Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max) -> (Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: scale + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: zero_point + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_min + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_max + type: int64_t + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, const Tensor &, const Tensor &, int64_t, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: scale + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: zero_point + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_min + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_max + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + - dynamic_type: Tensor + name: result2 + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: fake_quantize_per_channel_affine + operator_name: fake_quantize_per_channel_affine + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::fake_quantize_per_channel_affine(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: scale + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: zero_point + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: axis + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_min + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_max + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const Tensor &, int64_t, int64_t, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: scale + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: zero_point + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: axis + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_min + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_max + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: fake_quantize_per_channel_affine_backward + operator_name: fake_quantize_per_channel_affine_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::fake_quantize_per_channel_affine_backward(Tensor grad, Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: scale + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: zero_point + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: axis + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_min + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_max + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const Tensor &, const Tensor &, int64_t, int64_t, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: scale + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: zero_point + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: axis + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_min + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_max + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _fake_quantize_learnable_per_channel_affine + operator_name: _fake_quantize_learnable_per_channel_affine + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_fake_quantize_learnable_per_channel_affine(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: scale + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: zero_point + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: axis + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_min + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_max + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const Tensor &, int64_t, int64_t, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: scale + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: zero_point + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: axis + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_min + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_max + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _fake_quantize_learnable_per_channel_affine_backward + operator_name: _fake_quantize_learnable_per_channel_affine_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_fake_quantize_learnable_per_channel_affine_backward(Tensor grad, Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max) -> (Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: scale + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: zero_point + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: axis + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_min + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_max + type: int64_t + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, const Tensor &, const Tensor &, int64_t, int64_t, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: scale + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: zero_point + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: axis + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_min + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: quant_max + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + - dynamic_type: Tensor + name: result2 + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _choose_qparams_per_tensor + operator_name: _choose_qparams_per_tensor + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_choose_qparams_per_tensor(Tensor self, bool reduce_range=False) -> (float, int) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: reduce_range + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: reduce_range + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: double + name: result0 + type: double + - dynamic_type: int64_t + name: result1 + type: int64_t + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _saturate_weight_to_fp16 + operator_name: _saturate_weight_to_fp16 + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_saturate_weight_to_fp16(Tensor weight) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: choose_qparams_optimized + operator_name: choose_qparams_optimized + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::choose_qparams_optimized(Tensor input, int numel, int n_bins, float ratio, int bit_width) -> (float, float) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: numel + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n_bins + type: int64_t + - annotation: null + dynamic_type: double + is_nullable: false + name: ratio + type: double + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: bit_width + type: int64_t + schema_order_cpp_signature: std::tuple (const Tensor &, int64_t, int64_t, double, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: numel + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n_bins + type: int64_t + - annotation: null + dynamic_type: double + is_nullable: false + name: ratio + type: double + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: bit_width + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: double + name: result0 + type: double + - dynamic_type: double + name: result1 + type: double + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: to + operator_name: to + overload_name: dtype_layout + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::to.dtype_layout(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: '{}' + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: non_blocking + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: copy + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, c10::optional, c10::optional, c10::optional, c10::optional, bool, bool, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: non_blocking + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: copy + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: MemoryFormat + is_nullable: true + kwarg_only: true + name: memory_format + type: c10::optional + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: to + operator_name: to + overload_name: device + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::to.device(Tensor self, Device device, ScalarType dtype, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Device + is_nullable: false + name: device + type: Device + - annotation: null + dynamic_type: ScalarType + is_nullable: false + name: dtype + type: ScalarType + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: non_blocking + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: copy + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: MemoryFormat + is_nullable: true + name: memory_format + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, Device, ScalarType, bool, bool, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Device + is_nullable: false + name: device + type: Device + - annotation: null + dynamic_type: ScalarType + is_nullable: false + name: dtype + type: ScalarType + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: non_blocking + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: copy + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: MemoryFormat + is_nullable: true + name: memory_format + type: c10::optional + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: to + operator_name: to + overload_name: dtype + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::to.dtype(Tensor self, ScalarType dtype, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: ScalarType + is_nullable: false + name: dtype + type: ScalarType + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: non_blocking + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: copy + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: MemoryFormat + is_nullable: true + name: memory_format + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, ScalarType, bool, bool, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: ScalarType + is_nullable: false + name: dtype + type: ScalarType + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: non_blocking + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: copy + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: MemoryFormat + is_nullable: true + name: memory_format + type: c10::optional + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: to + operator_name: to + overload_name: other + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::to.other(Tensor self, Tensor other, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: non_blocking + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: copy + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: MemoryFormat + is_nullable: true + name: memory_format + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, bool, bool, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: non_blocking + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: copy + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: MemoryFormat + is_nullable: true + name: memory_format + type: c10::optional + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: meshgrid + operator_name: meshgrid + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::meshgrid(Tensor[] tensors) -> Tensor[] + arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors + type: TensorList + schema_order_cpp_signature: std::vector (TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors + type: TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: TensorList + name: result + type: std::vector + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cartesian_prod + operator_name: cartesian_prod + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::cartesian_prod(Tensor[] tensors) -> Tensor + arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors + type: TensorList + schema_order_cpp_signature: Tensor (TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors + type: TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: combinations + operator_name: combinations + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::combinations(Tensor self, int r=2, bool with_replacement=False) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: 2 + dynamic_type: int64_t + is_nullable: false + name: r + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: with_replacement + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, int64_t, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: 2 + dynamic_type: int64_t + is_nullable: false + name: r + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: with_replacement + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: item + operator_name: item + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::item(Tensor self) -> Scalar + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Scalar (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Scalar + name: result + type: Scalar + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: result_type + operator_name: result_type + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::result_type.Tensor(Tensor tensor, Tensor other) -> ScalarType + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: tensor + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: ScalarType (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: tensor + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: ScalarType + name: result + type: ScalarType + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: result_type + operator_name: result_type + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::result_type.Scalar(Tensor tensor, Scalar other) -> ScalarType + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: tensor + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: ScalarType (const Tensor &, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: tensor + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: ScalarType + name: result + type: ScalarType + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: result_type + operator_name: result_type + overload_name: Scalar_Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::result_type.Scalar_Tensor(Scalar scalar, Tensor tensor) -> ScalarType + arguments: + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: scalar + type: Scalar + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: tensor + type: const Tensor & + schema_order_cpp_signature: ScalarType (Scalar, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: scalar + type: Scalar + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: tensor + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: ScalarType + name: result + type: ScalarType + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: result_type + operator_name: result_type + overload_name: Scalar_Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::result_type.Scalar_Scalar(Scalar scalar1, Scalar scalar2) -> ScalarType + arguments: + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: scalar1 + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: scalar2 + type: Scalar + schema_order_cpp_signature: ScalarType (Scalar, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: scalar1 + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: scalar2 + type: Scalar + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: ScalarType + name: result + type: ScalarType + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: can_cast + operator_name: can_cast + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::can_cast(ScalarType from, ScalarType to) -> bool + arguments: + - annotation: null + dynamic_type: ScalarType + is_nullable: false + name: from + type: ScalarType + - annotation: null + dynamic_type: ScalarType + is_nullable: false + name: to + type: ScalarType + schema_order_cpp_signature: bool (ScalarType, ScalarType) + schema_order_arguments: + - annotation: null + dynamic_type: ScalarType + is_nullable: false + name: from + type: ScalarType + - annotation: null + dynamic_type: ScalarType + is_nullable: false + name: to + type: ScalarType + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: bool + name: result + type: bool + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: promote_types + operator_name: promote_types + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::promote_types(ScalarType type1, ScalarType type2) -> ScalarType + arguments: + - annotation: null + dynamic_type: ScalarType + is_nullable: false + name: type1 + type: ScalarType + - annotation: null + dynamic_type: ScalarType + is_nullable: false + name: type2 + type: ScalarType + schema_order_cpp_signature: ScalarType (ScalarType, ScalarType) + schema_order_arguments: + - annotation: null + dynamic_type: ScalarType + is_nullable: false + name: type1 + type: ScalarType + - annotation: null + dynamic_type: ScalarType + is_nullable: false + name: type2 + type: ScalarType + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: ScalarType + name: result + type: ScalarType + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _local_scalar_dense + operator_name: _local_scalar_dense + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_local_scalar_dense(Tensor self) -> Scalar + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Scalar (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Scalar + name: result + type: Scalar + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _thnn_fused_lstm_cell + operator_name: _thnn_fused_lstm_cell + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_thnn_fused_lstm_cell(Tensor input_gates, Tensor hidden_gates, Tensor cx, Tensor? input_bias=None, Tensor? hidden_bias=None) -> (Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input_gates + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: hidden_gates + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: cx + type: const Tensor & + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: input_bias + type: const c10::optional& + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: hidden_bias + type: const c10::optional& + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, const Tensor &, const c10::optional&, const c10::optional&) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input_gates + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: hidden_gates + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: cx + type: const Tensor & + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: input_bias + type: const c10::optional& + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: hidden_bias + type: const c10::optional& + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + - dynamic_type: Tensor + name: result2 + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _thnn_fused_lstm_cell_backward + operator_name: _thnn_fused_lstm_cell_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_thnn_fused_lstm_cell_backward(Tensor? grad_hy, Tensor? grad_cy, Tensor cx, Tensor cy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: grad_hy + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: grad_cy + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: cx + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: cy + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: workspace + type: const Tensor & + - annotation: null + dynamic_type: bool + is_nullable: false + name: has_bias + type: bool + schema_order_cpp_signature: std::tuple (const c10::optional&, const c10::optional&, const Tensor &, const Tensor &, const Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: grad_hy + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: grad_cy + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: cx + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: cy + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: workspace + type: const Tensor & + - annotation: null + dynamic_type: bool + is_nullable: false + name: has_bias + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + - dynamic_type: Tensor + name: result2 + type: Tensor + - dynamic_type: Tensor + name: result3 + type: Tensor + - dynamic_type: Tensor + name: result4 + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _thnn_differentiable_lstm_cell_backward + operator_name: _thnn_differentiable_lstm_cell_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_thnn_differentiable_lstm_cell_backward(Tensor? grad_hy, Tensor? grad_cy, Tensor input_gates, Tensor hidden_gates, Tensor? input_bias, Tensor? hidden_bias, Tensor cx, Tensor cy) -> (Tensor, Tensor, Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: grad_hy + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: grad_cy + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input_gates + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: hidden_gates + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: input_bias + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: hidden_bias + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: cx + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: cy + type: const Tensor & + schema_order_cpp_signature: std::tuple (const c10::optional&, const c10::optional&, const Tensor &, const Tensor &, const c10::optional&, const c10::optional&, const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: grad_hy + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: grad_cy + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input_gates + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: hidden_gates + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: input_bias + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: hidden_bias + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: cx + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: cy + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + - dynamic_type: Tensor + name: result2 + type: Tensor + - dynamic_type: Tensor + name: result3 + type: Tensor + - dynamic_type: Tensor + name: result4 + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _thnn_fused_gru_cell + operator_name: _thnn_fused_gru_cell + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_thnn_fused_gru_cell(Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias=None, Tensor? hidden_bias=None) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input_gates + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: hidden_gates + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: hx + type: const Tensor & + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: input_bias + type: const c10::optional& + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: hidden_bias + type: const c10::optional& + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, const Tensor &, const c10::optional&, const c10::optional&) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input_gates + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: hidden_gates + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: hx + type: const Tensor & + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: input_bias + type: const c10::optional& + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: hidden_bias + type: const c10::optional& + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _thnn_fused_gru_cell_backward + operator_name: _thnn_fused_gru_cell_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_thnn_fused_gru_cell_backward(Tensor grad_hy, Tensor workspace, bool has_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_hy + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: workspace + type: const Tensor & + - annotation: null + dynamic_type: bool + is_nullable: false + name: has_bias + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_hy + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: workspace + type: const Tensor & + - annotation: null + dynamic_type: bool + is_nullable: false + name: has_bias + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + - dynamic_type: Tensor + name: result2 + type: Tensor + - dynamic_type: Tensor + name: result3 + type: Tensor + - dynamic_type: Tensor + name: result4 + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _thnn_differentiable_gru_cell_backward + operator_name: _thnn_differentiable_gru_cell_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_thnn_differentiable_gru_cell_backward(Tensor grad_hy, Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias, Tensor? hidden_bias) -> (Tensor, Tensor, Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_hy + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input_gates + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: hidden_gates + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: hx + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: input_bias + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: hidden_bias + type: const c10::optional& + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, const Tensor &, const Tensor &, const c10::optional&, const c10::optional&) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_hy + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input_gates + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: hidden_gates + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: hx + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: input_bias + type: const c10::optional& + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: hidden_bias + type: const c10::optional& + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + - dynamic_type: Tensor + name: result2 + type: Tensor + - dynamic_type: Tensor + name: result3 + type: Tensor + - dynamic_type: Tensor + name: result4 + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: lstm + operator_name: lstm + overload_name: input + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::lstm.input(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: hx + type: TensorList + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: params + type: TensorList + - annotation: null + dynamic_type: bool + is_nullable: false + name: has_biases + type: bool + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_layers + type: int64_t + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: bidirectional + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: batch_first + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, TensorList, TensorList, bool, int64_t, double, bool, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: hx + type: TensorList + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: params + type: TensorList + - annotation: null + dynamic_type: bool + is_nullable: false + name: has_biases + type: bool + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_layers + type: int64_t + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: bidirectional + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: batch_first + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + - dynamic_type: Tensor + name: result2 + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: lstm + operator_name: lstm + overload_name: data + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::lstm.data(Tensor data, Tensor batch_sizes, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: data + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: batch_sizes + type: const Tensor & + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: hx + type: TensorList + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: params + type: TensorList + - annotation: null + dynamic_type: bool + is_nullable: false + name: has_biases + type: bool + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_layers + type: int64_t + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: bidirectional + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, TensorList, TensorList, bool, int64_t, double, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: data + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: batch_sizes + type: const Tensor & + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: hx + type: TensorList + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: params + type: TensorList + - annotation: null + dynamic_type: bool + is_nullable: false + name: has_biases + type: bool + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_layers + type: int64_t + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: bidirectional + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + - dynamic_type: Tensor + name: result2 + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: gru + operator_name: gru + overload_name: input + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::gru.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: hx + type: const Tensor & + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: params + type: TensorList + - annotation: null + dynamic_type: bool + is_nullable: false + name: has_biases + type: bool + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_layers + type: int64_t + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: bidirectional + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: batch_first + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, TensorList, bool, int64_t, double, bool, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: hx + type: const Tensor & + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: params + type: TensorList + - annotation: null + dynamic_type: bool + is_nullable: false + name: has_biases + type: bool + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_layers + type: int64_t + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: bidirectional + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: batch_first + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: gru + operator_name: gru + overload_name: data + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::gru.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: data + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: batch_sizes + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: hx + type: const Tensor & + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: params + type: TensorList + - annotation: null + dynamic_type: bool + is_nullable: false + name: has_biases + type: bool + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_layers + type: int64_t + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: bidirectional + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, const Tensor &, TensorList, bool, int64_t, double, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: data + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: batch_sizes + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: hx + type: const Tensor & + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: params + type: TensorList + - annotation: null + dynamic_type: bool + is_nullable: false + name: has_biases + type: bool + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_layers + type: int64_t + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: bidirectional + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: rnn_tanh + operator_name: rnn_tanh + overload_name: input + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::rnn_tanh.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: hx + type: const Tensor & + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: params + type: TensorList + - annotation: null + dynamic_type: bool + is_nullable: false + name: has_biases + type: bool + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_layers + type: int64_t + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: bidirectional + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: batch_first + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, TensorList, bool, int64_t, double, bool, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: hx + type: const Tensor & + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: params + type: TensorList + - annotation: null + dynamic_type: bool + is_nullable: false + name: has_biases + type: bool + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_layers + type: int64_t + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: bidirectional + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: batch_first + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: rnn_tanh + operator_name: rnn_tanh + overload_name: data + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::rnn_tanh.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: data + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: batch_sizes + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: hx + type: const Tensor & + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: params + type: TensorList + - annotation: null + dynamic_type: bool + is_nullable: false + name: has_biases + type: bool + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_layers + type: int64_t + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: bidirectional + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, const Tensor &, TensorList, bool, int64_t, double, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: data + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: batch_sizes + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: hx + type: const Tensor & + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: params + type: TensorList + - annotation: null + dynamic_type: bool + is_nullable: false + name: has_biases + type: bool + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_layers + type: int64_t + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: bidirectional + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: rnn_relu + operator_name: rnn_relu + overload_name: input + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::rnn_relu.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: hx + type: const Tensor & + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: params + type: TensorList + - annotation: null + dynamic_type: bool + is_nullable: false + name: has_biases + type: bool + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_layers + type: int64_t + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: bidirectional + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: batch_first + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, TensorList, bool, int64_t, double, bool, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: hx + type: const Tensor & + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: params + type: TensorList + - annotation: null + dynamic_type: bool + is_nullable: false + name: has_biases + type: bool + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_layers + type: int64_t + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: bidirectional + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: batch_first + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: rnn_relu + operator_name: rnn_relu + overload_name: data + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::rnn_relu.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: data + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: batch_sizes + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: hx + type: const Tensor & + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: params + type: TensorList + - annotation: null + dynamic_type: bool + is_nullable: false + name: has_biases + type: bool + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_layers + type: int64_t + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: bidirectional + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, const Tensor &, TensorList, bool, int64_t, double, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: data + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: batch_sizes + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: hx + type: const Tensor & + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: params + type: TensorList + - annotation: null + dynamic_type: bool + is_nullable: false + name: has_biases + type: bool + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_layers + type: int64_t + - annotation: null + dynamic_type: double + is_nullable: false + name: dropout + type: double + - annotation: null + dynamic_type: bool + is_nullable: false + name: train + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: bidirectional + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: lstm_cell + operator_name: lstm_cell + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: hx + type: TensorList + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: w_ih + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: w_hh + type: const Tensor & + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: b_ih + type: const c10::optional& + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: b_hh + type: const c10::optional& + schema_order_cpp_signature: std::tuple (const Tensor &, TensorList, const Tensor &, const Tensor &, const c10::optional&, const c10::optional&) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: hx + type: TensorList + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: w_ih + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: w_hh + type: const Tensor & + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: b_ih + type: const c10::optional& + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: b_hh + type: const c10::optional& + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: gru_cell + operator_name: gru_cell + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::gru_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: hx + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: w_ih + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: w_hh + type: const Tensor & + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: b_ih + type: const c10::optional& + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: b_hh + type: const c10::optional& + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const Tensor &, const Tensor &, const c10::optional&, const c10::optional&) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: hx + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: w_ih + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: w_hh + type: const Tensor & + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: b_ih + type: const c10::optional& + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: b_hh + type: const c10::optional& + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: rnn_tanh_cell + operator_name: rnn_tanh_cell + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::rnn_tanh_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: hx + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: w_ih + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: w_hh + type: const Tensor & + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: b_ih + type: const c10::optional& + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: b_hh + type: const c10::optional& + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const Tensor &, const Tensor &, const c10::optional&, const c10::optional&) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: hx + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: w_ih + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: w_hh + type: const Tensor & + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: b_ih + type: const c10::optional& + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: b_hh + type: const c10::optional& + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: rnn_relu_cell + operator_name: rnn_relu_cell + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::rnn_relu_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: hx + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: w_ih + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: w_hh + type: const Tensor & + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: b_ih + type: const c10::optional& + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: b_hh + type: const c10::optional& + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const Tensor &, const Tensor &, const c10::optional&, const c10::optional&) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: hx + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: w_ih + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: w_hh + type: const Tensor & + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: b_ih + type: const c10::optional& + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: b_hh + type: const c10::optional& + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: quantized_lstm_cell + operator_name: quantized_lstm_cell + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::quantized_lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: hx + type: TensorList + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: w_ih + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: w_hh + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: b_ih + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: b_hh + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: packed_ih + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: packed_hh + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: col_offsets_ih + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: col_offsets_hh + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: scale_ih + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: scale_hh + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: zero_point_ih + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: zero_point_hh + type: Scalar + schema_order_cpp_signature: std::tuple (const Tensor &, TensorList, const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, Scalar, Scalar, Scalar, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: hx + type: TensorList + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: w_ih + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: w_hh + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: b_ih + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: b_hh + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: packed_ih + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: packed_hh + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: col_offsets_ih + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: col_offsets_hh + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: scale_ih + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: scale_hh + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: zero_point_ih + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: zero_point_hh + type: Scalar + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: quantized_gru_cell + operator_name: quantized_gru_cell + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::quantized_gru_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: hx + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: w_ih + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: w_hh + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: b_ih + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: b_hh + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: packed_ih + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: packed_hh + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: col_offsets_ih + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: col_offsets_hh + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: scale_ih + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: scale_hh + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: zero_point_ih + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: zero_point_hh + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, Scalar, Scalar, Scalar, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: hx + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: w_ih + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: w_hh + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: b_ih + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: b_hh + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: packed_ih + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: packed_hh + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: col_offsets_ih + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: col_offsets_hh + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: scale_ih + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: scale_hh + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: zero_point_ih + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: zero_point_hh + type: Scalar + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: quantized_rnn_relu_cell + operator_name: quantized_rnn_relu_cell + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::quantized_rnn_relu_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: hx + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: w_ih + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: w_hh + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: b_ih + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: b_hh + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: packed_ih + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: packed_hh + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: col_offsets_ih + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: col_offsets_hh + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: scale_ih + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: scale_hh + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: zero_point_ih + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: zero_point_hh + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, Scalar, Scalar, Scalar, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: hx + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: w_ih + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: w_hh + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: b_ih + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: b_hh + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: packed_ih + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: packed_hh + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: col_offsets_ih + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: col_offsets_hh + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: scale_ih + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: scale_hh + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: zero_point_ih + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: zero_point_hh + type: Scalar + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: quantized_rnn_tanh_cell + operator_name: quantized_rnn_tanh_cell + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::quantized_rnn_tanh_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: hx + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: w_ih + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: w_hh + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: b_ih + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: b_hh + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: packed_ih + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: packed_hh + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: col_offsets_ih + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: col_offsets_hh + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: scale_ih + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: scale_hh + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: zero_point_ih + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: zero_point_hh + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, const Tensor &, Scalar, Scalar, Scalar, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: hx + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: w_ih + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: w_hh + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: b_ih + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: b_hh + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: packed_ih + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: packed_hh + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: col_offsets_ih + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: col_offsets_hh + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: scale_ih + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: scale_hh + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: zero_point_ih + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: zero_point_hh + type: Scalar + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _pack_padded_sequence + operator_name: _pack_padded_sequence + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_pack_padded_sequence(Tensor input, Tensor lengths, bool batch_first) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: lengths + type: const Tensor & + - annotation: null + dynamic_type: bool + is_nullable: false + name: batch_first + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: lengths + type: const Tensor & + - annotation: null + dynamic_type: bool + is_nullable: false + name: batch_first + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _pack_padded_sequence_backward + operator_name: _pack_padded_sequence_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_pack_padded_sequence_backward(Tensor grad, int[] input_size, Tensor batch_sizes, bool batch_first) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: input_size + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: batch_sizes + type: const Tensor & + - annotation: null + dynamic_type: bool + is_nullable: false + name: batch_first + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef, const Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: input_size + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: batch_sizes + type: const Tensor & + - annotation: null + dynamic_type: bool + is_nullable: false + name: batch_first + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _pad_packed_sequence + operator_name: _pad_packed_sequence + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_pad_packed_sequence(Tensor data, Tensor batch_sizes, bool batch_first, Scalar padding_value, int total_length) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: data + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: batch_sizes + type: const Tensor & + - annotation: null + dynamic_type: bool + is_nullable: false + name: batch_first + type: bool + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: padding_value + type: Scalar + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: total_length + type: int64_t + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, bool, Scalar, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: data + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: batch_sizes + type: const Tensor & + - annotation: null + dynamic_type: bool + is_nullable: false + name: batch_first + type: bool + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: padding_value + type: Scalar + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: total_length + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: set_ + operator_name: set_ + overload_name: source_Storage + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::set_.source_Storage(Tensor(a!) self, Storage source) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Storage + is_nullable: false + name: source + type: Storage + schema_order_cpp_signature: Tensor & (Tensor &, Storage) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Storage + is_nullable: false + name: source + type: Storage + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: set_ + operator_name: set_ + overload_name: source_Storage_storage_offset + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::set_.source_Storage_storage_offset(Tensor(a!) self, Storage source, int storage_offset, int[] size, int[] stride=[]) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Storage + is_nullable: false + name: source + type: Storage + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: storage_offset + type: int64_t + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + schema_order_cpp_signature: Tensor & (Tensor &, Storage, int64_t, IntArrayRef, IntArrayRef) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Storage + is_nullable: false + name: source + type: Storage + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: storage_offset + type: int64_t + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: IntArrayRef + is_nullable: false + name: stride + type: IntArrayRef + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: set_ + operator_name: set_ + overload_name: source_Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::set_.source_Tensor(Tensor(a!) self, Tensor source) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: source + type: const Tensor & + schema_order_cpp_signature: Tensor & (Tensor &, const Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: source + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: set_ + operator_name: set_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::set_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + schema_order_cpp_signature: Tensor & (Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: set_quantizer_ + operator_name: set_quantizer_ + overload_name: '' + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::set_quantizer_(Tensor(a!) self, ConstQuantizerPtr quantizer) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: ConstQuantizerPtr + is_nullable: false + name: quantizer + type: ConstQuantizerPtr + schema_order_cpp_signature: Tensor & (Tensor &, ConstQuantizerPtr) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: ConstQuantizerPtr + is_nullable: false + name: quantizer + type: ConstQuantizerPtr + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: is_set_to + operator_name: is_set_to + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::is_set_to(Tensor self, Tensor tensor) -> bool + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: tensor + type: const Tensor & + schema_order_cpp_signature: bool (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: tensor + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: bool + name: result + type: bool + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: masked_fill_ + operator_name: masked_fill_ + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::masked_fill_.Scalar(Tensor(a!) self, Tensor mask, Scalar value) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mask + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: value + type: Scalar + schema_order_cpp_signature: Tensor & (Tensor &, const Tensor &, Scalar) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mask + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: value + type: Scalar + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: masked_fill + operator_name: masked_fill + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::masked_fill.Scalar(Tensor self, Tensor mask, Scalar value) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mask + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: value + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mask + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: value + type: Scalar + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: masked_fill_ + operator_name: masked_fill_ + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::masked_fill_.Tensor(Tensor(a!) self, Tensor mask, Tensor value) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mask + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: value + type: const Tensor & + schema_order_cpp_signature: Tensor & (Tensor &, const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mask + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: value + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: masked_fill + operator_name: masked_fill + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::masked_fill.Tensor(Tensor self, Tensor mask, Tensor value) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mask + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: value + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mask + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: value + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: masked_scatter_ + operator_name: masked_scatter_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::masked_scatter_(Tensor(a!) self, Tensor mask, Tensor source) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mask + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: source + type: const Tensor & + schema_order_cpp_signature: Tensor & (Tensor &, const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mask + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: source + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: masked_scatter + operator_name: masked_scatter + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::masked_scatter(Tensor self, Tensor mask, Tensor source) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mask + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: source + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mask + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: source + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: view + operator_name: view + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::view(Tensor(a) self, int[] size) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef) + schema_order_arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: put_ + operator_name: put_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::put_(Tensor(a!) self, Tensor index, Tensor source, bool accumulate=False) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: source + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: accumulate + type: bool + schema_order_cpp_signature: Tensor & (Tensor &, const Tensor &, const Tensor &, bool) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: source + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: accumulate + type: bool + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: index_add_ + operator_name: index_add_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::index_add_(Tensor(a!) self, int dim, Tensor index, Tensor source) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: source + type: const Tensor & + schema_order_cpp_signature: Tensor & (Tensor &, int64_t, const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: source + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: index_add + operator_name: index_add + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::index_add(Tensor self, int dim, Tensor index, Tensor source) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: source + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, int64_t, const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: source + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: index_add + operator_name: index_add + overload_name: dimname + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::index_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor source) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: source + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, Dimname, const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: source + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: index_fill_ + operator_name: index_fill_ + overload_name: int_Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::index_fill_.int_Scalar(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: value + type: Scalar + schema_order_cpp_signature: Tensor & (Tensor &, int64_t, const Tensor &, Scalar) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: value + type: Scalar + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: index_fill + operator_name: index_fill + overload_name: int_Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::index_fill.int_Scalar(Tensor self, int dim, Tensor index, Scalar value) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: value + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, int64_t, const Tensor &, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: value + type: Scalar + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: index_fill_ + operator_name: index_fill_ + overload_name: int_Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::index_fill_.int_Tensor(Tensor(a!) self, int dim, Tensor index, Tensor value) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: value + type: const Tensor & + schema_order_cpp_signature: Tensor & (Tensor &, int64_t, const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: value + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: index_fill + operator_name: index_fill + overload_name: int_Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::index_fill.int_Tensor(Tensor self, int dim, Tensor index, Tensor value) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: value + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, int64_t, const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: value + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: index_fill_ + operator_name: index_fill_ + overload_name: Dimname_Scalar + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::index_fill_.Dimname_Scalar(Tensor(a!) self, Dimname dim, Tensor index, Scalar value) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: value + type: Scalar + schema_order_cpp_signature: Tensor & (Tensor &, Dimname, const Tensor &, Scalar) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: value + type: Scalar + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: index_fill_ + operator_name: index_fill_ + overload_name: Dimname_Tensor + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::index_fill_.Dimname_Tensor(Tensor(a!) self, Dimname dim, Tensor index, Tensor value) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: value + type: const Tensor & + schema_order_cpp_signature: Tensor & (Tensor &, Dimname, const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: value + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: index_fill + operator_name: index_fill + overload_name: Dimname_Scalar + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::index_fill.Dimname_Scalar(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: value + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, Dimname, const Tensor &, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: value + type: Scalar + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: index_fill + operator_name: index_fill + overload_name: Dimname_Tensor + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::index_fill.Dimname_Tensor(Tensor self, Dimname dim, Tensor index, Tensor value) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: value + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, Dimname, const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: value + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: scatter_ + operator_name: scatter_ + overload_name: src + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::scatter_.src(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: src + type: const Tensor & + schema_order_cpp_signature: Tensor & (Tensor &, int64_t, const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: src + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: scatter + operator_name: scatter + overload_name: src + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::scatter.src(Tensor self, int dim, Tensor index, Tensor src) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: src + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, int64_t, const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: src + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: scatter_ + operator_name: scatter_ + overload_name: value + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::scatter_.value(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: value + type: Scalar + schema_order_cpp_signature: Tensor & (Tensor &, int64_t, const Tensor &, Scalar) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: value + type: Scalar + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: scatter + operator_name: scatter + overload_name: value + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::scatter.value(Tensor self, int dim, Tensor index, Scalar value) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: value + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, int64_t, const Tensor &, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: value + type: Scalar + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: scatter + operator_name: scatter + overload_name: dimname_src + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::scatter.dimname_src(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: src + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, Dimname, const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: src + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: scatter + operator_name: scatter + overload_name: dimname_value + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::scatter.dimname_value(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: value + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, Dimname, const Tensor &, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: value + type: Scalar + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: scatter_ + operator_name: scatter_ + overload_name: reduce + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::scatter_.reduce(Tensor(a!) self, int dim, Tensor index, Tensor src, *, str reduce) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: src + type: const Tensor & + - annotation: null + dynamic_type: std::string + is_nullable: false + kwarg_only: true + name: reduce + type: std::string + schema_order_cpp_signature: Tensor & (Tensor &, int64_t, const Tensor &, const Tensor &, std::string) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: src + type: const Tensor & + - annotation: null + dynamic_type: std::string + is_nullable: false + kwarg_only: true + name: reduce + type: std::string + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: scatter_ + operator_name: scatter_ + overload_name: value_reduce + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::scatter_.value_reduce(Tensor(a!) self, int dim, Tensor index, Scalar value, *, str reduce) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: value + type: Scalar + - annotation: null + dynamic_type: std::string + is_nullable: false + kwarg_only: true + name: reduce + type: std::string + schema_order_cpp_signature: Tensor & (Tensor &, int64_t, const Tensor &, Scalar, std::string) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: value + type: Scalar + - annotation: null + dynamic_type: std::string + is_nullable: false + kwarg_only: true + name: reduce + type: std::string + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: scatter_add_ + operator_name: scatter_add_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::scatter_add_(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: src + type: const Tensor & + schema_order_cpp_signature: Tensor & (Tensor &, int64_t, const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: src + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: scatter_add + operator_name: scatter_add + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::scatter_add(Tensor self, int dim, Tensor index, Tensor src) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: src + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, int64_t, const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: src + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: scatter_add + operator_name: scatter_add + overload_name: dimname + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::scatter_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: src + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, Dimname, const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: src + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: eq_ + operator_name: eq_ + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::eq_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor & (Tensor &, Scalar) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: eq_ + operator_name: eq_ + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::eq_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (Tensor &, const Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bitwise_and_out + operator_name: bitwise_and + overload_name: Tensor_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::bitwise_and.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bitwise_and_out + operator_name: bitwise_and + overload_name: Scalar_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::bitwise_and.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor & (const Tensor &, Scalar, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bitwise_and + operator_name: bitwise_and + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::bitwise_and.Scalar(Tensor self, Scalar other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bitwise_and + operator_name: bitwise_and + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::bitwise_and.Tensor(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bitwise_and_ + operator_name: bitwise_and_ + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::bitwise_and_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor & (Tensor &, Scalar) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bitwise_and_ + operator_name: bitwise_and_ + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::bitwise_and_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (Tensor &, const Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: __and__ + operator_name: __and__ + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::__and__.Scalar(Tensor self, Scalar other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: __and__ + operator_name: __and__ + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::__and__.Tensor(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: __iand__ + operator_name: __iand__ + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::__iand__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor & (Tensor &, Scalar) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: __iand__ + operator_name: __iand__ + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::__iand__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (Tensor &, const Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bitwise_or_out + operator_name: bitwise_or + overload_name: Tensor_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::bitwise_or.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bitwise_or_out + operator_name: bitwise_or + overload_name: Scalar_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::bitwise_or.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor & (const Tensor &, Scalar, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bitwise_or + operator_name: bitwise_or + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::bitwise_or.Scalar(Tensor self, Scalar other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bitwise_or + operator_name: bitwise_or + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::bitwise_or.Tensor(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bitwise_or_ + operator_name: bitwise_or_ + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::bitwise_or_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor & (Tensor &, Scalar) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bitwise_or_ + operator_name: bitwise_or_ + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::bitwise_or_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (Tensor &, const Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: __or__ + operator_name: __or__ + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::__or__.Scalar(Tensor self, Scalar other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: __or__ + operator_name: __or__ + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::__or__.Tensor(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: __ior__ + operator_name: __ior__ + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::__ior__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor & (Tensor &, Scalar) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: __ior__ + operator_name: __ior__ + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::__ior__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (Tensor &, const Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bitwise_xor_out + operator_name: bitwise_xor + overload_name: Tensor_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::bitwise_xor.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bitwise_xor_out + operator_name: bitwise_xor + overload_name: Scalar_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::bitwise_xor.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor & (const Tensor &, Scalar, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bitwise_xor + operator_name: bitwise_xor + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::bitwise_xor.Scalar(Tensor self, Scalar other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bitwise_xor + operator_name: bitwise_xor + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::bitwise_xor.Tensor(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bitwise_xor_ + operator_name: bitwise_xor_ + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::bitwise_xor_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor & (Tensor &, Scalar) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bitwise_xor_ + operator_name: bitwise_xor_ + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::bitwise_xor_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (Tensor &, const Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: __xor__ + operator_name: __xor__ + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::__xor__.Scalar(Tensor self, Scalar other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: __xor__ + operator_name: __xor__ + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::__xor__.Tensor(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: __ixor__ + operator_name: __ixor__ + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::__ixor__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor & (Tensor &, Scalar) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: __ixor__ + operator_name: __ixor__ + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::__ixor__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (Tensor &, const Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: __lshift__ + operator_name: __lshift__ + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::__lshift__.Scalar(Tensor self, Scalar other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: __lshift__ + operator_name: __lshift__ + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::__lshift__.Tensor(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: __ilshift__ + operator_name: __ilshift__ + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::__ilshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor & (Tensor &, Scalar) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: __ilshift__ + operator_name: __ilshift__ + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::__ilshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (Tensor &, const Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: __rshift__ + operator_name: __rshift__ + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::__rshift__.Scalar(Tensor self, Scalar other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: __rshift__ + operator_name: __rshift__ + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::__rshift__.Tensor(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: __irshift__ + operator_name: __irshift__ + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::__irshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor & (Tensor &, Scalar) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: __irshift__ + operator_name: __irshift__ + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::__irshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (Tensor &, const Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: lgamma_ + operator_name: lgamma_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::lgamma_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + schema_order_cpp_signature: Tensor & (Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: atan2_ + operator_name: atan2_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::atan2_(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (Tensor &, const Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: tril_ + operator_name: tril_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::tril_(Tensor(a!) self, int diagonal=0) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: diagonal + type: int64_t + schema_order_cpp_signature: Tensor & (Tensor &, int64_t) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: diagonal + type: int64_t + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: triu_ + operator_name: triu_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::triu_(Tensor(a!) self, int diagonal=0) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: diagonal + type: int64_t + schema_order_cpp_signature: Tensor & (Tensor &, int64_t) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: diagonal + type: int64_t + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: digamma_ + operator_name: digamma_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::digamma_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + schema_order_cpp_signature: Tensor & (Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: polygamma_ + operator_name: polygamma_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::polygamma_(Tensor(a!) self, int n) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + schema_order_cpp_signature: Tensor & (Tensor &, int64_t) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: renorm_ + operator_name: renorm_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::renorm_(Tensor(a!) self, Scalar p, int dim, Scalar maxnorm) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: p + type: Scalar + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: maxnorm + type: Scalar + schema_order_cpp_signature: Tensor & (Tensor &, Scalar, int64_t, Scalar) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: p + type: Scalar + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: maxnorm + type: Scalar + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: pow_ + operator_name: pow_ + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::pow_.Scalar(Tensor(a!) self, Scalar exponent) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: exponent + type: Scalar + schema_order_cpp_signature: Tensor & (Tensor &, Scalar) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: exponent + type: Scalar + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: pow_ + operator_name: pow_ + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::pow_.Tensor(Tensor(a!) self, Tensor exponent) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: exponent + type: const Tensor & + schema_order_cpp_signature: Tensor & (Tensor &, const Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: exponent + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: lerp_ + operator_name: lerp_ + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::lerp_.Scalar(Tensor(a!) self, Tensor end, Scalar weight) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: end + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: weight + type: Scalar + schema_order_cpp_signature: Tensor & (Tensor &, const Tensor &, Scalar) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: end + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: weight + type: Scalar + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: lerp_ + operator_name: lerp_ + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::lerp_.Tensor(Tensor(a!) self, Tensor end, Tensor weight) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: end + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + schema_order_cpp_signature: Tensor & (Tensor &, const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: end + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: fmod_ + operator_name: fmod_ + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::fmod_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor & (Tensor &, Scalar) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: fmod_ + operator_name: fmod_ + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::fmod_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (Tensor &, const Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: remainder_ + operator_name: remainder_ + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::remainder_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor & (Tensor &, Scalar) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: remainder_ + operator_name: remainder_ + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::remainder_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (Tensor &, const Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: addbmm_ + operator_name: addbmm_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::addbmm_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: batch1 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: batch2 + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: beta + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + schema_order_cpp_signature: Tensor & (Tensor &, const Tensor &, const Tensor &, Scalar, Scalar) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: batch1 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: batch2 + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: beta + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: addbmm_out + operator_name: addbmm + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::addbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: batch1 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: batch2 + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: beta + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, const Tensor &, Scalar, Scalar, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: batch1 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: batch2 + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: beta + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: addbmm + operator_name: addbmm + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::addbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: batch1 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: batch2 + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: beta + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const Tensor &, Scalar, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: batch1 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: batch2 + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: beta + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: addcdiv_ + operator_name: addcdiv_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::addcdiv_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: tensor1 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: tensor2 + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: value + type: Scalar + schema_order_cpp_signature: Tensor & (Tensor &, const Tensor &, const Tensor &, Scalar) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: tensor1 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: tensor2 + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: value + type: Scalar + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: random_ + operator_name: random_ + overload_name: from + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::random_.from(Tensor(a!) self, int from, int? to, *, Generator? generator=None) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: from + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: true + name: to + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + schema_order_cpp_signature: Tensor & (Tensor &, int64_t, c10::optional, c10::optional) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: from + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: true + name: to + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: random_ + operator_name: random_ + overload_name: to + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::random_.to(Tensor(a!) self, int to, *, Generator? generator=None) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: to + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + schema_order_cpp_signature: Tensor & (Tensor &, int64_t, c10::optional) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: to + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: random_ + operator_name: random_ + overload_name: '' + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::random_(Tensor(a!) self, *, Generator? generator=None) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + schema_order_cpp_signature: Tensor & (Tensor &, c10::optional) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: uniform_ + operator_name: uniform_ + overload_name: '' + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::uniform_(Tensor(a!) self, float from=0, float to=1, *, Generator? generator=None) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + default: 0 + dynamic_type: double + is_nullable: false + name: from + type: double + - annotation: null + default: 1 + dynamic_type: double + is_nullable: false + name: to + type: double + - annotation: null + default: c10::nullopt + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + schema_order_cpp_signature: Tensor & (Tensor &, double, double, c10::optional) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + default: 0 + dynamic_type: double + is_nullable: false + name: from + type: double + - annotation: null + default: 1 + dynamic_type: double + is_nullable: false + name: to + type: double + - annotation: null + default: c10::nullopt + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cauchy_ + operator_name: cauchy_ + overload_name: '' + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::cauchy_(Tensor(a!) self, float median=0, float sigma=1, *, Generator? generator=None) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + default: 0 + dynamic_type: double + is_nullable: false + name: median + type: double + - annotation: null + default: 1 + dynamic_type: double + is_nullable: false + name: sigma + type: double + - annotation: null + default: c10::nullopt + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + schema_order_cpp_signature: Tensor & (Tensor &, double, double, c10::optional) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + default: 0 + dynamic_type: double + is_nullable: false + name: median + type: double + - annotation: null + default: 1 + dynamic_type: double + is_nullable: false + name: sigma + type: double + - annotation: null + default: c10::nullopt + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: log_normal_ + operator_name: log_normal_ + overload_name: '' + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::log_normal_(Tensor(a!) self, float mean=1, float std=2, *, Generator? generator=None) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + default: 1 + dynamic_type: double + is_nullable: false + name: mean + type: double + - annotation: null + default: 2 + dynamic_type: double + is_nullable: false + name: std + type: double + - annotation: null + default: c10::nullopt + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + schema_order_cpp_signature: Tensor & (Tensor &, double, double, c10::optional) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + default: 1 + dynamic_type: double + is_nullable: false + name: mean + type: double + - annotation: null + default: 2 + dynamic_type: double + is_nullable: false + name: std + type: double + - annotation: null + default: c10::nullopt + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: exponential_ + operator_name: exponential_ + overload_name: '' + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::exponential_(Tensor(a!) self, float lambd=1, *, Generator? generator=None) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + default: 1 + dynamic_type: double + is_nullable: false + name: lambd + type: double + - annotation: null + default: c10::nullopt + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + schema_order_cpp_signature: Tensor & (Tensor &, double, c10::optional) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + default: 1 + dynamic_type: double + is_nullable: false + name: lambd + type: double + - annotation: null + default: c10::nullopt + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: geometric_ + operator_name: geometric_ + overload_name: '' + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::geometric_(Tensor(a!) self, float p, *, Generator? generator=None) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + default: c10::nullopt + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + schema_order_cpp_signature: Tensor & (Tensor &, double, c10::optional) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: p + type: double + - annotation: null + default: c10::nullopt + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: diag_out + operator_name: diag + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::diag.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: diagonal + type: int64_t + schema_order_cpp_signature: Tensor & (const Tensor &, int64_t, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: diagonal + type: int64_t + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: diag + operator_name: diag + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::diag(Tensor self, int diagonal=0) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: diagonal + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: diagonal + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: diag_backward + operator_name: diag_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::diag_backward(Tensor grad, int[] input_sizes, int diagonal) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: input_sizes + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: diagonal + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: input_sizes + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: diagonal + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: cross_out + operator_name: cross + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::cross.out(Tensor self, Tensor other, int? dim=None, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, c10::optional, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cross + operator_name: cross + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::cross(Tensor self, Tensor other, int? dim=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: triu_out + operator_name: triu + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::triu.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: diagonal + type: int64_t + schema_order_cpp_signature: Tensor & (const Tensor &, int64_t, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: diagonal + type: int64_t + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: triu + operator_name: triu + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::triu(Tensor self, int diagonal=0) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: diagonal + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: diagonal + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: tril_out + operator_name: tril + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::tril.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: diagonal + type: int64_t + schema_order_cpp_signature: Tensor & (const Tensor &, int64_t, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: diagonal + type: int64_t + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: tril + operator_name: tril + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::tril(Tensor self, int diagonal=0) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: diagonal + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: diagonal + type: int64_t + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: tril_indices + operator_name: tril_indices + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::tril_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: row + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: col + type: int64_t + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: offset + type: int64_t + - annotation: null + default: at::kLong + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + schema_order_cpp_signature: Tensor (int64_t, int64_t, int64_t, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: row + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: col + type: int64_t + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: offset + type: int64_t + - annotation: null + default: long + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: true + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: triu_indices + operator_name: triu_indices + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::triu_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: row + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: col + type: int64_t + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: offset + type: int64_t + - annotation: null + default: at::kLong + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + schema_order_cpp_signature: Tensor (int64_t, int64_t, int64_t, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: row + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: col + type: int64_t + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: offset + type: int64_t + - annotation: null + default: long + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: true + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: trace + operator_name: trace + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::trace(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: trace_backward + operator_name: trace_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::trace_backward(Tensor grad, int[] sizes) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: sizes + type: IntArrayRef + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: sizes + type: IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: ne_out + operator_name: ne + overload_name: Scalar_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::ne.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor & (const Tensor &, Scalar, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: ne + operator_name: ne + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::ne.Scalar(Tensor self, Scalar other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: ne_out + operator_name: ne + overload_name: Tensor_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::ne.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: ne + operator_name: ne + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::ne.Tensor(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: ne_ + operator_name: ne_ + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::ne_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor & (Tensor &, Scalar) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: ne_ + operator_name: ne_ + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::ne_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (Tensor &, const Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: not_equal_out + operator_name: not_equal + overload_name: Scalar_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::not_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor & (const Tensor &, Scalar, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: not_equal + operator_name: not_equal + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::not_equal.Scalar(Tensor self, Scalar other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: not_equal_out + operator_name: not_equal + overload_name: Tensor_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::not_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: not_equal + operator_name: not_equal + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::not_equal.Tensor(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: not_equal_ + operator_name: not_equal_ + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::not_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor & (Tensor &, Scalar) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: not_equal_ + operator_name: not_equal_ + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::not_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (Tensor &, const Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: eq_out + operator_name: eq + overload_name: Scalar_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::eq.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor & (const Tensor &, Scalar, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: eq + operator_name: eq + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::eq.Scalar(Tensor self, Scalar other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: eq_out + operator_name: eq + overload_name: Tensor_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::eq.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: eq + operator_name: eq + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::eq.Tensor(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: ge_out + operator_name: ge + overload_name: Scalar_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::ge.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor & (const Tensor &, Scalar, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: ge + operator_name: ge + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::ge.Scalar(Tensor self, Scalar other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: ge_out + operator_name: ge + overload_name: Tensor_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::ge.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: ge + operator_name: ge + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::ge.Tensor(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: ge_ + operator_name: ge_ + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::ge_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor & (Tensor &, Scalar) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: ge_ + operator_name: ge_ + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::ge_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (Tensor &, const Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: greater_equal_out + operator_name: greater_equal + overload_name: Scalar_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::greater_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor & (const Tensor &, Scalar, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: greater_equal + operator_name: greater_equal + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::greater_equal.Scalar(Tensor self, Scalar other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: greater_equal_out + operator_name: greater_equal + overload_name: Tensor_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::greater_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: greater_equal + operator_name: greater_equal + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::greater_equal.Tensor(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: greater_equal_ + operator_name: greater_equal_ + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::greater_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor & (Tensor &, Scalar) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: greater_equal_ + operator_name: greater_equal_ + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::greater_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (Tensor &, const Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: le_out + operator_name: le + overload_name: Scalar_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::le.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor & (const Tensor &, Scalar, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: le + operator_name: le + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::le.Scalar(Tensor self, Scalar other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: le_out + operator_name: le + overload_name: Tensor_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::le.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: le + operator_name: le + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::le.Tensor(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: le_ + operator_name: le_ + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::le_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor & (Tensor &, Scalar) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: le_ + operator_name: le_ + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::le_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (Tensor &, const Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: less_equal_out + operator_name: less_equal + overload_name: Scalar_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::less_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor & (const Tensor &, Scalar, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: less_equal + operator_name: less_equal + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::less_equal.Scalar(Tensor self, Scalar other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: less_equal_out + operator_name: less_equal + overload_name: Tensor_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::less_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: less_equal + operator_name: less_equal + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::less_equal.Tensor(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: less_equal_ + operator_name: less_equal_ + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::less_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor & (Tensor &, Scalar) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: less_equal_ + operator_name: less_equal_ + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::less_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (Tensor &, const Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: gt_out + operator_name: gt + overload_name: Scalar_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::gt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor & (const Tensor &, Scalar, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: gt + operator_name: gt + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::gt.Scalar(Tensor self, Scalar other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: gt_out + operator_name: gt + overload_name: Tensor_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::gt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: gt + operator_name: gt + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::gt.Tensor(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: gt_ + operator_name: gt_ + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::gt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor & (Tensor &, Scalar) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: gt_ + operator_name: gt_ + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::gt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (Tensor &, const Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: greater_out + operator_name: greater + overload_name: Scalar_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::greater.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor & (const Tensor &, Scalar, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: greater + operator_name: greater + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::greater.Scalar(Tensor self, Scalar other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: greater_out + operator_name: greater + overload_name: Tensor_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::greater.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: greater + operator_name: greater + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::greater.Tensor(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: greater_ + operator_name: greater_ + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::greater_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor & (Tensor &, Scalar) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: greater_ + operator_name: greater_ + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::greater_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (Tensor &, const Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: lt_out + operator_name: lt + overload_name: Scalar_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::lt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor & (const Tensor &, Scalar, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: lt + operator_name: lt + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::lt.Scalar(Tensor self, Scalar other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: lt_out + operator_name: lt + overload_name: Tensor_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::lt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: lt + operator_name: lt + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::lt.Tensor(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: lt_ + operator_name: lt_ + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::lt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor & (Tensor &, Scalar) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: lt_ + operator_name: lt_ + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::lt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (Tensor &, const Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: less_out + operator_name: less + overload_name: Scalar_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::less.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor & (const Tensor &, Scalar, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: less + operator_name: less + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::less.Scalar(Tensor self, Scalar other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: less_out + operator_name: less + overload_name: Tensor_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::less.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: less + operator_name: less + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::less.Tensor(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: less_ + operator_name: less_ + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::less_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor & (Tensor &, Scalar) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: less_ + operator_name: less_ + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::less_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (Tensor &, const Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: take_out + operator_name: take + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::take.out(Tensor self, Tensor index, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: take + operator_name: take + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::take(Tensor self, Tensor index) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: take_backward + operator_name: take_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::take_backward(Tensor grad, Tensor input, Tensor index) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: index_select_out + operator_name: index_select + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::index_select.out(Tensor self, int dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, int64_t, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: index_select + operator_name: index_select + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::index_select(Tensor self, int dim, Tensor index) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, int64_t, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: index_select_out + operator_name: index_select + overload_name: dimname_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::index_select.dimname_out(Tensor self, Dimname dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, Dimname, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: index_select + operator_name: index_select + overload_name: dimname + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::index_select.dimname(Tensor self, Dimname dim, Tensor index) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, Dimname, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: index_select_backward + operator_name: index_select_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::index_select_backward(Tensor grad, int[] self_sizes, int dim, Tensor index) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: self_sizes + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef, int64_t, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: self_sizes + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: masked_select_out + operator_name: masked_select + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::masked_select.out(Tensor self, Tensor mask, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mask + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mask + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: masked_select + operator_name: masked_select + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::masked_select(Tensor self, Tensor mask) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mask + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mask + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: masked_select_backward + operator_name: masked_select_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::masked_select_backward(Tensor grad, Tensor input, Tensor mask) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mask + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mask + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: nonzero_out + operator_name: nonzero + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::nonzero.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: nonzero + operator_name: nonzero + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::nonzero(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: nonzero_numpy + operator_name: nonzero_numpy + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::nonzero_numpy(Tensor self) -> Tensor[] + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: std::vector (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: TensorList + name: result + type: std::vector + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: gather_out + operator_name: gather + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::gather.out(Tensor self, int dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: sparse_grad + type: bool + schema_order_cpp_signature: Tensor & (const Tensor &, int64_t, const Tensor &, bool, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: sparse_grad + type: bool + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: gather + operator_name: gather + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::gather(Tensor self, int dim, Tensor index, *, bool sparse_grad=False) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: sparse_grad + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, int64_t, const Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: sparse_grad + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: gather_backward + operator_name: gather_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::gather_backward(Tensor grad, Tensor self, int dim, Tensor index, bool sparse_grad) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + - annotation: null + dynamic_type: bool + is_nullable: false + name: sparse_grad + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, int64_t, const Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + - annotation: null + dynamic_type: bool + is_nullable: false + name: sparse_grad + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: gather_out + operator_name: gather + overload_name: dimname_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::gather.dimname_out(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: sparse_grad + type: bool + schema_order_cpp_signature: Tensor & (const Tensor &, Dimname, const Tensor &, bool, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: sparse_grad + type: bool + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: gather + operator_name: gather + overload_name: dimname + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::gather.dimname(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: sparse_grad + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, Dimname, const Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: sparse_grad + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _gather_sparse_backward + operator_name: _gather_sparse_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_gather_sparse_backward(Tensor self, int dim, Tensor index, Tensor grad) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, int64_t, const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: addcmul_out + operator_name: addcmul + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::addcmul.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: tensor1 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: tensor2 + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: value + type: Scalar + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, const Tensor &, Scalar, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: tensor1 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: tensor2 + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: value + type: Scalar + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: addcmul + operator_name: addcmul + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::addcmul(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: tensor1 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: tensor2 + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: value + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const Tensor &, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: tensor1 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: tensor2 + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: value + type: Scalar + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: addcmul_ + operator_name: addcmul_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::addcmul_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: tensor1 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: tensor2 + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: value + type: Scalar + schema_order_cpp_signature: Tensor & (Tensor &, const Tensor &, const Tensor &, Scalar) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: tensor1 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: tensor2 + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: value + type: Scalar + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: addcdiv_out + operator_name: addcdiv + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::addcdiv.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: tensor1 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: tensor2 + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: value + type: Scalar + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, const Tensor &, Scalar, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: tensor1 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: tensor2 + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: value + type: Scalar + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: addcdiv + operator_name: addcdiv + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::addcdiv(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: tensor1 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: tensor2 + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: value + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const Tensor &, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: tensor1 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: tensor2 + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: value + type: Scalar + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: lstsq_out + operator_name: lstsq + overload_name: X + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::lstsq.X(Tensor self, Tensor A, *, Tensor(a!) X, Tensor(b!) qr) -> (Tensor(a!) solution, Tensor(b!) QR) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + field_name: solution + is_nullable: false + name: X + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + field_name: QR + is_nullable: false + name: qr + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: A + type: const Tensor & + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: A + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + field_name: solution + is_nullable: false + name: X + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + field_name: QR + is_nullable: false + name: qr + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + field_name: solution + name: X + type: Tensor & + - dynamic_type: Tensor + field_name: QR + name: qr + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: lstsq + operator_name: lstsq + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::lstsq(Tensor self, Tensor A) -> (Tensor solution, Tensor QR) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: A + type: const Tensor & + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: A + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + field_name: solution + name: solution + type: Tensor + - dynamic_type: Tensor + field_name: QR + name: QR + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: triangular_solve_out + operator_name: triangular_solve + overload_name: X + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::triangular_solve.X(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False, *, Tensor(a!) X, Tensor(b!) M) -> (Tensor(a!) solution, Tensor(b!) cloned_coefficient) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + field_name: solution + is_nullable: false + name: X + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + field_name: cloned_coefficient + is_nullable: false + name: M + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: A + type: const Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: upper + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: transpose + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: unitriangular + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, bool, bool, bool, Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: A + type: const Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: upper + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: transpose + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: unitriangular + type: bool + - allocate: true + annotation: a! + dynamic_type: Tensor + field_name: solution + is_nullable: false + name: X + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + field_name: cloned_coefficient + is_nullable: false + name: M + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + field_name: solution + name: X + type: Tensor & + - dynamic_type: Tensor + field_name: cloned_coefficient + name: M + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: triangular_solve + operator_name: triangular_solve + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::triangular_solve(Tensor self, Tensor A, bool upper=True, bool transpose=False, bool unitriangular=False) -> (Tensor solution, Tensor cloned_coefficient) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: A + type: const Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: upper + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: transpose + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: unitriangular + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, bool, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: A + type: const Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: upper + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: transpose + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: unitriangular + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + field_name: solution + name: solution + type: Tensor + - dynamic_type: Tensor + field_name: cloned_coefficient + name: cloned_coefficient + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _triangular_solve_helper + operator_name: _triangular_solve_helper + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_triangular_solve_helper(Tensor self, Tensor A, bool upper, bool transpose, bool unitriangular) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: A + type: const Tensor & + - annotation: null + dynamic_type: bool + is_nullable: false + name: upper + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: transpose + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: unitriangular + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, bool, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: A + type: const Tensor & + - annotation: null + dynamic_type: bool + is_nullable: false + name: upper + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: transpose + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: unitriangular + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: symeig_out + operator_name: symeig + overload_name: e + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::symeig.e(Tensor self, bool eigenvectors=False, bool upper=True, *, Tensor(a!) e, Tensor(b!) V) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + field_name: eigenvalues + is_nullable: false + name: e + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + field_name: eigenvectors + is_nullable: false + name: V + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: eigenvectors + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: upper + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, bool, bool, Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: eigenvectors + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: upper + type: bool + - allocate: true + annotation: a! + dynamic_type: Tensor + field_name: eigenvalues + is_nullable: false + name: e + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + field_name: eigenvectors + is_nullable: false + name: V + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + field_name: eigenvalues + name: e + type: Tensor & + - dynamic_type: Tensor + field_name: eigenvectors + name: V + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: symeig + operator_name: symeig + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::symeig(Tensor self, bool eigenvectors=False, bool upper=True) -> (Tensor eigenvalues, Tensor eigenvectors) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: eigenvectors + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: upper + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: eigenvectors + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: upper + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + field_name: eigenvalues + name: eigenvalues + type: Tensor + - dynamic_type: Tensor + field_name: eigenvectors + name: eigenvectors_return + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _symeig_helper + operator_name: _symeig_helper + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_symeig_helper(Tensor self, bool eigenvectors, bool upper) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: bool + is_nullable: false + name: eigenvectors + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: upper + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: bool + is_nullable: false + name: eigenvectors + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: upper + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: eig_out + operator_name: eig + overload_name: e + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::eig.e(Tensor self, bool eigenvectors=False, *, Tensor(a!) e, Tensor(b!) v) -> (Tensor(a!) eigenvalues, Tensor(b!) eigenvectors) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + field_name: eigenvalues + is_nullable: false + name: e + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + field_name: eigenvectors + is_nullable: false + name: v + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: eigenvectors + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, bool, Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: eigenvectors + type: bool + - allocate: true + annotation: a! + dynamic_type: Tensor + field_name: eigenvalues + is_nullable: false + name: e + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + field_name: eigenvectors + is_nullable: false + name: v + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + field_name: eigenvalues + name: e + type: Tensor & + - dynamic_type: Tensor + field_name: eigenvectors + name: v + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: eig + operator_name: eig + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::eig(Tensor self, bool eigenvectors=False) -> (Tensor eigenvalues, Tensor eigenvectors) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: eigenvectors + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: eigenvectors + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + field_name: eigenvalues + name: eigenvalues + type: Tensor + - dynamic_type: Tensor + field_name: eigenvectors + name: eigenvectors_return + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: svd_out + operator_name: svd + overload_name: U + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::svd.U(Tensor self, bool some=True, bool compute_uv=True, *, Tensor(a!) U, Tensor(b!) S, Tensor(c!) V) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) V) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + field_name: U + is_nullable: false + name: U + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + field_name: S + is_nullable: false + name: S + output: true + type: Tensor & + - allocate: true + annotation: c! + dynamic_type: Tensor + field_name: V + is_nullable: false + name: V + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: some + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: compute_uv + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, bool, bool, Tensor &, Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: some + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: compute_uv + type: bool + - allocate: true + annotation: a! + dynamic_type: Tensor + field_name: U + is_nullable: false + name: U + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + field_name: S + is_nullable: false + name: S + output: true + type: Tensor & + - allocate: true + annotation: c! + dynamic_type: Tensor + field_name: V + is_nullable: false + name: V + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + field_name: U + name: U + type: Tensor & + - dynamic_type: Tensor + field_name: S + name: S + type: Tensor & + - dynamic_type: Tensor + field_name: V + name: V + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: svd + operator_name: svd + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::svd(Tensor self, bool some=True, bool compute_uv=True) -> (Tensor U, Tensor S, Tensor V) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: some + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: compute_uv + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: some + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: compute_uv + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + field_name: U + name: U + type: Tensor + - dynamic_type: Tensor + field_name: S + name: S + type: Tensor + - dynamic_type: Tensor + field_name: V + name: V + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _svd_helper + operator_name: _svd_helper + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_svd_helper(Tensor self, bool some, bool compute_uv) -> (Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: bool + is_nullable: false + name: some + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: compute_uv + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: bool + is_nullable: false + name: some + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: compute_uv + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + - dynamic_type: Tensor + name: result2 + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cholesky_out + operator_name: cholesky + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::cholesky.out(Tensor self, bool upper=False, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: upper + type: bool + schema_order_cpp_signature: Tensor & (const Tensor &, bool, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: upper + type: bool + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cholesky + operator_name: cholesky + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::cholesky(Tensor self, bool upper=False) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: upper + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: upper + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _cholesky_helper + operator_name: _cholesky_helper + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_cholesky_helper(Tensor self, bool upper) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: bool + is_nullable: false + name: upper + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: bool + is_nullable: false + name: upper + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cholesky_solve_out + operator_name: cholesky_solve + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::cholesky_solve.out(Tensor self, Tensor input2, bool upper=False, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input2 + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: upper + type: bool + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, bool, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input2 + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: upper + type: bool + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cholesky_solve + operator_name: cholesky_solve + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::cholesky_solve(Tensor self, Tensor input2, bool upper=False) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input2 + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: upper + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input2 + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: upper + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _cholesky_solve_helper + operator_name: _cholesky_solve_helper + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_cholesky_solve_helper(Tensor self, Tensor A, bool upper) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: A + type: const Tensor & + - annotation: null + dynamic_type: bool + is_nullable: false + name: upper + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: A + type: const Tensor & + - annotation: null + dynamic_type: bool + is_nullable: false + name: upper + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: solve + operator_name: solve + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::solve(Tensor self, Tensor A) -> (Tensor solution, Tensor LU) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: A + type: const Tensor & + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: A + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + field_name: solution + name: solution + type: Tensor + - dynamic_type: Tensor + field_name: LU + name: LU + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: solve_out + operator_name: solve + overload_name: solution + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::solve.solution(Tensor self, Tensor A, *, Tensor(a!) solution, Tensor(b!) lu) -> (Tensor(a!) solution, Tensor(b!) LU) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + field_name: solution + is_nullable: false + name: solution + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + field_name: LU + is_nullable: false + name: lu + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: A + type: const Tensor & + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: A + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + field_name: solution + is_nullable: false + name: solution + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + field_name: LU + is_nullable: false + name: lu + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + field_name: solution + name: solution + type: Tensor & + - dynamic_type: Tensor + field_name: LU + name: lu + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _solve_helper + operator_name: _solve_helper + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_solve_helper(Tensor self, Tensor A) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: A + type: const Tensor & + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: A + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cholesky_inverse_out + operator_name: cholesky_inverse + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::cholesky_inverse.out(Tensor self, bool upper=False, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: upper + type: bool + schema_order_cpp_signature: Tensor & (const Tensor &, bool, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: upper + type: bool + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: cholesky_inverse + operator_name: cholesky_inverse + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::cholesky_inverse(Tensor self, bool upper=False) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: upper + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: upper + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: qr_out + operator_name: qr + overload_name: Q + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::qr.Q(Tensor self, bool some=True, *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + field_name: Q + is_nullable: false + name: Q + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + field_name: R + is_nullable: false + name: R + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: some + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, bool, Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: some + type: bool + - allocate: true + annotation: a! + dynamic_type: Tensor + field_name: Q + is_nullable: false + name: Q + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + field_name: R + is_nullable: false + name: R + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + field_name: Q + name: Q + type: Tensor & + - dynamic_type: Tensor + field_name: R + name: R + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: qr + operator_name: qr + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::qr(Tensor self, bool some=True) -> (Tensor Q, Tensor R) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: some + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: some + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + field_name: Q + name: Q + type: Tensor + - dynamic_type: Tensor + field_name: R + name: R + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _qr_helper + operator_name: _qr_helper + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_qr_helper(Tensor self, bool some) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: bool + is_nullable: false + name: some + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: bool + is_nullable: false + name: some + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: geqrf_out + operator_name: geqrf + overload_name: a + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::geqrf.a(Tensor self, *, Tensor(a!) a, Tensor(b!) tau) -> (Tensor(a!) a, Tensor(b!) tau) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + field_name: a + is_nullable: false + name: a + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + field_name: tau + is_nullable: false + name: tau + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: std::tuple (const Tensor &, Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + field_name: a + is_nullable: false + name: a + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + field_name: tau + is_nullable: false + name: tau + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + field_name: a + name: a + type: Tensor & + - dynamic_type: Tensor + field_name: tau + name: tau + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: geqrf + operator_name: geqrf + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::geqrf(Tensor self) -> (Tensor a, Tensor tau) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: std::tuple (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + field_name: a + name: a + type: Tensor + - dynamic_type: Tensor + field_name: tau + name: tau + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: orgqr_out + operator_name: orgqr + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::orgqr.out(Tensor self, Tensor input2, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input2 + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input2 + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: orgqr + operator_name: orgqr + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::orgqr(Tensor self, Tensor input2) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input2 + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input2 + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: ormqr_out + operator_name: ormqr + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::ormqr.out(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input2 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input3 + type: const Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: left + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: transpose + type: bool + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, const Tensor &, bool, bool, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input2 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input3 + type: const Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: left + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: transpose + type: bool + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: ormqr + operator_name: ormqr + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::ormqr(Tensor self, Tensor input2, Tensor input3, bool left=True, bool transpose=False) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input2 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input3 + type: const Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: left + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: transpose + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const Tensor &, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input2 + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input3 + type: const Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: left + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: transpose + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _lu_with_info + operator_name: _lu_with_info + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_lu_with_info(Tensor self, bool pivot=True, bool check_errors=True) -> (Tensor, Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: pivot + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: check_errors + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: pivot + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: check_errors + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + - dynamic_type: Tensor + name: result2 + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: lu_solve_out + operator_name: lu_solve + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::lu_solve.out(Tensor self, Tensor LU_data, Tensor LU_pivots, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: LU_data + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: LU_pivots + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: LU_data + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: LU_pivots + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: lu_solve + operator_name: lu_solve + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::lu_solve(Tensor self, Tensor LU_data, Tensor LU_pivots) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: LU_data + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: LU_pivots + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: LU_data + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: LU_pivots + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _lu_solve_helper + operator_name: _lu_solve_helper + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_lu_solve_helper(Tensor self, Tensor LU_data, Tensor LU_pivots) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: LU_data + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: LU_pivots + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: LU_data + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: LU_pivots + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: multinomial_out + operator_name: multinomial + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::multinomial.out(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_samples + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: replacement + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + schema_order_cpp_signature: Tensor & (const Tensor &, int64_t, bool, c10::optional, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_samples + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: replacement + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: multinomial + operator_name: multinomial + overload_name: '' + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::multinomial(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_samples + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: replacement + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, int64_t, bool, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_samples + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: replacement + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _multinomial_alias_setup + operator_name: _multinomial_alias_setup + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_multinomial_alias_setup(Tensor probs) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: probs + type: const Tensor & + schema_order_cpp_signature: std::tuple (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: probs + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _multinomial_alias_draw + operator_name: _multinomial_alias_draw + overload_name: '' + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_multinomial_alias_draw(Tensor J, Tensor q, int num_samples, *, Generator? generator=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: J + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: q + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_samples + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, int64_t, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: J + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: q + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: num_samples + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: lgamma_out + operator_name: lgamma + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::lgamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: lgamma + operator_name: lgamma + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::lgamma(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: digamma_out + operator_name: digamma + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::digamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: digamma + operator_name: digamma + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::digamma(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: polygamma_out + operator_name: polygamma + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::polygamma.out(int n, Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor & (int64_t, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: polygamma + operator_name: polygamma + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::polygamma(int n, Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (int64_t, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: n + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: erfinv + operator_name: erfinv + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::erfinv(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: erfinv_ + operator_name: erfinv_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::erfinv_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + schema_order_cpp_signature: Tensor & (Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: erfinv_out + operator_name: erfinv + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::erfinv.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: i0 + operator_name: i0 + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::i0(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: i0_ + operator_name: i0_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::i0_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + schema_order_cpp_signature: Tensor & (Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: i0_out + operator_name: i0 + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sign + operator_name: sign + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::sign(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sign_ + operator_name: sign_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::sign_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + schema_order_cpp_signature: Tensor & (Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sign_out + operator_name: sign + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::sign.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: signbit + operator_name: signbit + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::signbit(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: signbit_out + operator_name: signbit + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::signbit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: dist + operator_name: dist + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::dist(Tensor self, Tensor other, Scalar p=2) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - annotation: null + default: 2 + dynamic_type: Scalar + is_nullable: false + name: p + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - annotation: null + default: 2 + dynamic_type: Scalar + is_nullable: false + name: p + type: Scalar + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: atan2_out + operator_name: atan2 + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::atan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: atan2 + operator_name: atan2 + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::atan2(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: lerp_out + operator_name: lerp + overload_name: Scalar_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::lerp.Scalar_out(Tensor self, Tensor end, Scalar weight, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: end + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: weight + type: Scalar + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, Scalar, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: end + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: weight + type: Scalar + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: lerp_out + operator_name: lerp + overload_name: Tensor_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::lerp.Tensor_out(Tensor self, Tensor end, Tensor weight, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: end + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: end + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: lerp + operator_name: lerp + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::lerp.Scalar(Tensor self, Tensor end, Scalar weight) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: end + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: weight + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: end + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: weight + type: Scalar + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: lerp + operator_name: lerp + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::lerp.Tensor(Tensor self, Tensor end, Tensor weight) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: end + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: end + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: histc_out + operator_name: histc + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::histc.out(Tensor self, int bins=100, Scalar min=0, Scalar max=0, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: 100 + dynamic_type: int64_t + is_nullable: false + name: bins + type: int64_t + - annotation: null + default: 0 + dynamic_type: Scalar + is_nullable: false + name: min + type: Scalar + - annotation: null + default: 0 + dynamic_type: Scalar + is_nullable: false + name: max + type: Scalar + schema_order_cpp_signature: Tensor & (const Tensor &, int64_t, Scalar, Scalar, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: 100 + dynamic_type: int64_t + is_nullable: false + name: bins + type: int64_t + - annotation: null + default: 0 + dynamic_type: Scalar + is_nullable: false + name: min + type: Scalar + - annotation: null + default: 0 + dynamic_type: Scalar + is_nullable: false + name: max + type: Scalar + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: histc + operator_name: histc + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::histc(Tensor self, int bins=100, Scalar min=0, Scalar max=0) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: 100 + dynamic_type: int64_t + is_nullable: false + name: bins + type: int64_t + - annotation: null + default: 0 + dynamic_type: Scalar + is_nullable: false + name: min + type: Scalar + - annotation: null + default: 0 + dynamic_type: Scalar + is_nullable: false + name: max + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, int64_t, Scalar, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: 100 + dynamic_type: int64_t + is_nullable: false + name: bins + type: int64_t + - annotation: null + default: 0 + dynamic_type: Scalar + is_nullable: false + name: min + type: Scalar + - annotation: null + default: 0 + dynamic_type: Scalar + is_nullable: false + name: max + type: Scalar + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: fmod_out + operator_name: fmod + overload_name: Scalar_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::fmod.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor & (const Tensor &, Scalar, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: fmod + operator_name: fmod + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::fmod.Scalar(Tensor self, Scalar other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: fmod_out + operator_name: fmod + overload_name: Tensor_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::fmod.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: fmod + operator_name: fmod + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::fmod.Tensor(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: hypot_out + operator_name: hypot + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::hypot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: hypot + operator_name: hypot + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::hypot(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: hypot_ + operator_name: hypot_ + overload_name: '' + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::hypot_(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (Tensor &, const Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: nextafter_out + operator_name: nextafter + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::nextafter.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: nextafter + operator_name: nextafter + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::nextafter(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: nextafter_ + operator_name: nextafter_ + overload_name: '' + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::nextafter_(Tensor(a!) self, Tensor other) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (Tensor &, const Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: remainder_out + operator_name: remainder + overload_name: Scalar_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::remainder.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor & (const Tensor &, Scalar, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: remainder + operator_name: remainder + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::remainder.Scalar(Tensor self, Scalar other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: other + type: Scalar + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: remainder_out + operator_name: remainder + overload_name: Tensor_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::remainder.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: remainder + operator_name: remainder + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::remainder.Tensor(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: min + operator_name: min + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::min(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: max + operator_name: max + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::max(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: maximum + operator_name: maximum + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::maximum(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: maximum_out + operator_name: maximum + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::maximum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: max + operator_name: max + overload_name: other + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::max.other(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: max_out + operator_name: max + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::max.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: minimum + operator_name: minimum + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::minimum(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: minimum_out + operator_name: minimum + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::minimum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: min_out + operator_name: min + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::min.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: min + operator_name: min + overload_name: other + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::min.other(Tensor self, Tensor other) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: median + operator_name: median + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::median(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: quantile_out + operator_name: quantile + overload_name: scalar_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::quantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: q + type: double + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: Tensor & (const Tensor &, double, c10::optional, bool, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: q + type: double + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: quantile + operator_name: quantile + overload_name: scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::quantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: q + type: double + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, double, c10::optional, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: q + type: double + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: quantile_out + operator_name: quantile + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::quantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: q + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, c10::optional, bool, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: q + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: quantile + operator_name: quantile + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::quantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: q + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, c10::optional, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: q + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: nanquantile_out + operator_name: nanquantile + overload_name: scalar_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::nanquantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: q + type: double + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: Tensor & (const Tensor &, double, c10::optional, bool, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: q + type: double + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: nanquantile + operator_name: nanquantile + overload_name: scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::nanquantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: q + type: double + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, double, c10::optional, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: q + type: double + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: nanquantile_out + operator_name: nanquantile + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::nanquantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: q + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, c10::optional, bool, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: q + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: nanquantile + operator_name: nanquantile + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::nanquantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: q + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, c10::optional, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: q + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: dim + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sort_out + operator_name: sort + overload_name: values + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::sort.values(Tensor self, int dim=-1, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + field_name: values + is_nullable: false + name: values + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + field_name: indices + is_nullable: false + name: indices + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: descending + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, int64_t, bool, Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: descending + type: bool + - allocate: true + annotation: a! + dynamic_type: Tensor + field_name: values + is_nullable: false + name: values + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + field_name: indices + is_nullable: false + name: indices + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + field_name: values + name: values + type: Tensor & + - dynamic_type: Tensor + field_name: indices + name: indices + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sort + operator_name: sort + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::sort(Tensor self, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: descending + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, int64_t, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: descending + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + field_name: values + name: values + type: Tensor + - dynamic_type: Tensor + field_name: indices + name: indices + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sort_out + operator_name: sort + overload_name: dimname_values + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::sort.dimname_values(Tensor self, Dimname dim, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + field_name: values + is_nullable: false + name: values + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + field_name: indices + is_nullable: false + name: indices + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: descending + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, Dimname, bool, Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: descending + type: bool + - allocate: true + annotation: a! + dynamic_type: Tensor + field_name: values + is_nullable: false + name: values + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + field_name: indices + is_nullable: false + name: indices + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + field_name: values + name: values + type: Tensor & + - dynamic_type: Tensor + field_name: indices + name: indices + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sort + operator_name: sort + overload_name: dimname + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::sort.dimname(Tensor self, Dimname dim, bool descending=False) -> (Tensor values, Tensor indices) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: descending + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, Dimname, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: descending + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + field_name: values + name: values + type: Tensor + - dynamic_type: Tensor + field_name: indices + name: indices + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: argsort + operator_name: argsort + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::argsort(Tensor self, int dim=-1, bool descending=False) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: descending + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, int64_t, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: descending + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: argsort + operator_name: argsort + overload_name: dimname + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::argsort.dimname(Tensor self, Dimname dim, bool descending=False) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: descending + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, Dimname, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Dimname + is_nullable: false + name: dim + type: Dimname + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: descending + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: topk_out + operator_name: topk + overload_name: values + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::topk.values(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + field_name: values + is_nullable: false + name: values + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + field_name: indices + is_nullable: false + name: indices + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: k + type: int64_t + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: largest + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: sorted + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, int64_t, int64_t, bool, bool, Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: k + type: int64_t + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: largest + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: sorted + type: bool + - allocate: true + annotation: a! + dynamic_type: Tensor + field_name: values + is_nullable: false + name: values + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + field_name: indices + is_nullable: false + name: indices + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + field_name: values + name: values + type: Tensor & + - dynamic_type: Tensor + field_name: indices + name: indices + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: topk + operator_name: topk + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::topk(Tensor self, int k, int dim=-1, bool largest=True, bool sorted=True) -> (Tensor values, Tensor indices) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: k + type: int64_t + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: largest + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: sorted + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, int64_t, int64_t, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: k + type: int64_t + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: largest + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: sorted + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + field_name: values + name: values + type: Tensor + - dynamic_type: Tensor + field_name: indices + name: indices + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: all + operator_name: all + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::all(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: any + operator_name: any + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::any(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: renorm_out + operator_name: renorm + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::renorm.out(Tensor self, Scalar p, int dim, Scalar maxnorm, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: p + type: Scalar + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: maxnorm + type: Scalar + schema_order_cpp_signature: Tensor & (const Tensor &, Scalar, int64_t, Scalar, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: p + type: Scalar + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: maxnorm + type: Scalar + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: renorm + operator_name: renorm + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::renorm(Tensor self, Scalar p, int dim, Scalar maxnorm) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: p + type: Scalar + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: maxnorm + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, Scalar, int64_t, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: p + type: Scalar + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: maxnorm + type: Scalar + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: unfold + operator_name: unfold + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::unfold(Tensor(a) self, int dimension, int size, int step) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dimension + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: size + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: step + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, int64_t, int64_t, int64_t) + schema_order_arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dimension + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: size + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: step + type: int64_t + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: unfold_backward + operator_name: unfold_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::unfold_backward(Tensor grad_in, int[] input_sizes, int dim, int size, int step) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_in + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: input_sizes + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: size + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: step + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef, int64_t, int64_t, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_in + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: input_sizes + type: IntArrayRef + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: size + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: step + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: equal + operator_name: equal + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::equal(Tensor self, Tensor other) -> bool + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + schema_order_cpp_signature: bool (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: bool + name: result + type: bool + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: pow_out + operator_name: pow + overload_name: Tensor_Scalar_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::pow.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: exponent + type: Scalar + schema_order_cpp_signature: Tensor & (const Tensor &, Scalar, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: exponent + type: Scalar + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: pow_out + operator_name: pow + overload_name: Tensor_Tensor_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::pow.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: exponent + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: exponent + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: pow + operator_name: pow + overload_name: Tensor_Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::pow.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: exponent + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: exponent + type: Scalar + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: pow + operator_name: pow + overload_name: Tensor_Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::pow.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: exponent + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: exponent + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: pow_out + operator_name: pow + overload_name: Scalar_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::pow.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: self + type: Scalar + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: exponent + type: const Tensor & + schema_order_cpp_signature: Tensor & (Scalar, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: self + type: Scalar + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: exponent + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: pow + operator_name: pow + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::pow.Scalar(Scalar self, Tensor exponent) -> Tensor + arguments: + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: self + type: Scalar + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: exponent + type: const Tensor & + schema_order_cpp_signature: Tensor (Scalar, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: self + type: Scalar + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: exponent + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: normal_ + operator_name: normal_ + overload_name: '' + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::normal_(Tensor(a!) self, float mean=0, float std=1, *, Generator? generator=None) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + default: 0 + dynamic_type: double + is_nullable: false + name: mean + type: double + - annotation: null + default: 1 + dynamic_type: double + is_nullable: false + name: std + type: double + - annotation: null + default: c10::nullopt + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + schema_order_cpp_signature: Tensor & (Tensor &, double, double, c10::optional) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + default: 0 + dynamic_type: double + is_nullable: false + name: mean + type: double + - annotation: null + default: 1 + dynamic_type: double + is_nullable: false + name: std + type: double + - annotation: null + default: c10::nullopt + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + method_of: + - Type + - Tensor + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: normal_out + operator_name: normal + overload_name: Tensor_float_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::normal.Tensor_float_out(Tensor mean, float std=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mean + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: double + is_nullable: false + name: std + type: double + - annotation: null + default: c10::nullopt + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + schema_order_cpp_signature: Tensor & (const Tensor &, double, c10::optional, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mean + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: double + is_nullable: false + name: std + type: double + - annotation: null + default: c10::nullopt + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: normal + operator_name: normal + overload_name: Tensor_float + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::normal.Tensor_float(Tensor mean, float std=1, *, Generator? generator=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mean + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: double + is_nullable: false + name: std + type: double + - annotation: null + default: c10::nullopt + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, double, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mean + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: double + is_nullable: false + name: std + type: double + - annotation: null + default: c10::nullopt + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: normal_out + operator_name: normal + overload_name: float_Tensor_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::normal.float_Tensor_out(float mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: mean + type: double + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: std + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + schema_order_cpp_signature: Tensor & (double, const Tensor &, c10::optional, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: double + is_nullable: false + name: mean + type: double + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: std + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: normal + operator_name: normal + overload_name: float_Tensor + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::normal.float_Tensor(float mean, Tensor std, *, Generator? generator=None) -> Tensor + arguments: + - annotation: null + dynamic_type: double + is_nullable: false + name: mean + type: double + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: std + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + schema_order_cpp_signature: Tensor (double, const Tensor &, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: double + is_nullable: false + name: mean + type: double + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: std + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: normal_out + operator_name: normal + overload_name: Tensor_Tensor_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::normal.Tensor_Tensor_out(Tensor mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mean + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: std + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, c10::optional, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mean + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: std + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: normal + operator_name: normal + overload_name: Tensor_Tensor + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::normal.Tensor_Tensor(Tensor mean, Tensor std, *, Generator? generator=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mean + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: std + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: mean + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: std + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: normal + operator_name: normal + overload_name: float_float + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::normal.float_float(float mean, float std, int[] size, *, Generator? generator=None, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor + arguments: + - annotation: null + dynamic_type: double + is_nullable: false + name: mean + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: std + type: double + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + - annotation: null + default: '{}' + dynamic_type: TensorOptions + is_nullable: false + kwarg_only: true + name: options + type: const TensorOptions & + schema_order_cpp_signature: Tensor (double, double, IntArrayRef, c10::optional, c10::optional, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: double + is_nullable: false + name: mean + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: std + type: double + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Layout + is_nullable: true + kwarg_only: true + name: layout + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: Device + is_nullable: true + kwarg_only: true + name: device + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: bool + is_nullable: true + kwarg_only: true + name: pin_memory + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: true + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: normal_out + operator_name: normal + overload_name: float_float_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::normal.float_float_out(float mean, float std, int[] size, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: mean + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: std + type: double + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + schema_order_cpp_signature: Tensor & (double, double, IntArrayRef, c10::optional, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: double + is_nullable: false + name: mean + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: std + type: double + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: size + type: IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: Generator + is_nullable: true + kwarg_only: true + name: generator + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: alias + operator_name: alias + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::alias(Tensor(a) self) -> Tensor(a) + arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: a + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _index_copy_ + operator_name: _index_copy_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_index_copy_(Tensor(a!) self, int dim, Tensor index, Tensor source) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: source + type: const Tensor & + schema_order_cpp_signature: Tensor & (Tensor &, int64_t, const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: index + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: source + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _cumsum + operator_name: _cumsum + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_cumsum(Tensor self, int dim) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _cumsum_out + operator_name: _cumsum + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_cumsum.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: Tensor & (const Tensor &, int64_t, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _cumprod + operator_name: _cumprod + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_cumprod(Tensor self, int dim) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _cumprod_out + operator_name: _cumprod + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_cumprod.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: Tensor & (const Tensor &, int64_t, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _var + operator_name: _var + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_var(Tensor self, bool unbiased=True) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unbiased + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unbiased + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _std + operator_name: _std + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_std(Tensor self, bool unbiased=True) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unbiased + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: unbiased + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _amp_non_finite_check_and_unscale_ + operator_name: _amp_non_finite_check_and_unscale_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_amp_non_finite_check_and_unscale_(Tensor(a!) self, Tensor(b!) found_inf, Tensor inv_scale) -> () + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: b! + dynamic_type: Tensor + is_nullable: false + name: found_inf + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: inv_scale + type: const Tensor & + schema_order_cpp_signature: void (Tensor &, Tensor &, const Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: b! + dynamic_type: Tensor + is_nullable: false + name: found_inf + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: inv_scale + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _amp_update_scale + operator_name: _amp_update_scale + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_amp_update_scale(Tensor(a!) growth_tracker, Tensor current_scale, Tensor found_inf, float scale_growth_factor, float scale_backoff_factor, int growth_interval) -> Tensor + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: growth_tracker + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: current_scale + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: found_inf + type: const Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: scale_growth_factor + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: scale_backoff_factor + type: double + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: growth_interval + type: int64_t + schema_order_cpp_signature: Tensor (Tensor &, const Tensor &, const Tensor &, double, double, int64_t) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: growth_tracker + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: current_scale + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: found_inf + type: const Tensor & + - annotation: null + dynamic_type: double + is_nullable: false + name: scale_growth_factor + type: double + - annotation: null + dynamic_type: double + is_nullable: false + name: scale_backoff_factor + type: double + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: growth_interval + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _cat + operator_name: _cat + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_cat(Tensor[] tensors, int dim=0) -> Tensor + arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors + type: TensorList + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: Tensor (TensorList, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors + type: TensorList + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _cat_out + operator_name: _cat + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_cat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors + type: TensorList + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: Tensor & (TensorList, int64_t, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors + type: TensorList + - annotation: null + default: 0 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_add + operator_name: _foreach_add + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_foreach_add.Scalar(Tensor[] tensors, Scalar scalar) -> Tensor[] + arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors + type: TensorList + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: scalar + type: Scalar + schema_order_cpp_signature: std::vector (TensorList, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors + type: TensorList + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: scalar + type: Scalar + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: TensorList + name: result + type: std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_add_ + operator_name: _foreach_add_ + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_foreach_add_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () + arguments: + - annotation: a! + dynamic_type: TensorList + is_nullable: false + name: self + type: TensorList + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: scalar + type: Scalar + schema_order_cpp_signature: void (TensorList, Scalar) + schema_order_arguments: + - annotation: a! + dynamic_type: TensorList + is_nullable: false + name: self + type: TensorList + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: scalar + type: Scalar + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_sub + operator_name: _foreach_sub + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_foreach_sub.Scalar(Tensor[] tensors, Scalar scalar) -> Tensor[] + arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors + type: TensorList + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: scalar + type: Scalar + schema_order_cpp_signature: std::vector (TensorList, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors + type: TensorList + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: scalar + type: Scalar + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: TensorList + name: result + type: std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_sub_ + operator_name: _foreach_sub_ + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_foreach_sub_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () + arguments: + - annotation: a! + dynamic_type: TensorList + is_nullable: false + name: self + type: TensorList + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: scalar + type: Scalar + schema_order_cpp_signature: void (TensorList, Scalar) + schema_order_arguments: + - annotation: a! + dynamic_type: TensorList + is_nullable: false + name: self + type: TensorList + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: scalar + type: Scalar + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_mul + operator_name: _foreach_mul + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_foreach_mul.Scalar(Tensor[] tensors, Scalar scalar) -> Tensor[] + arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors + type: TensorList + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: scalar + type: Scalar + schema_order_cpp_signature: std::vector (TensorList, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors + type: TensorList + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: scalar + type: Scalar + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: TensorList + name: result + type: std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_mul_ + operator_name: _foreach_mul_ + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_foreach_mul_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () + arguments: + - annotation: a! + dynamic_type: TensorList + is_nullable: false + name: self + type: TensorList + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: scalar + type: Scalar + schema_order_cpp_signature: void (TensorList, Scalar) + schema_order_arguments: + - annotation: a! + dynamic_type: TensorList + is_nullable: false + name: self + type: TensorList + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: scalar + type: Scalar + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_div + operator_name: _foreach_div + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_foreach_div.Scalar(Tensor[] tensors, Scalar scalar) -> Tensor[] + arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors + type: TensorList + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: scalar + type: Scalar + schema_order_cpp_signature: std::vector (TensorList, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors + type: TensorList + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: scalar + type: Scalar + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: TensorList + name: result + type: std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_div_ + operator_name: _foreach_div_ + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_foreach_div_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () + arguments: + - annotation: a! + dynamic_type: TensorList + is_nullable: false + name: self + type: TensorList + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: scalar + type: Scalar + schema_order_cpp_signature: void (TensorList, Scalar) + schema_order_arguments: + - annotation: a! + dynamic_type: TensorList + is_nullable: false + name: self + type: TensorList + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: scalar + type: Scalar + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_add + operator_name: _foreach_add + overload_name: List + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_foreach_add.List(Tensor[] tensors1, Tensor[] tensors2, *, Scalar alpha=1) -> Tensor[] + arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors1 + type: TensorList + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors2 + type: TensorList + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + schema_order_cpp_signature: std::vector (TensorList, TensorList, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors1 + type: TensorList + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors2 + type: TensorList + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: TensorList + name: result + type: std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_add_ + operator_name: _foreach_add_ + overload_name: List + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_foreach_add_.List(Tensor(a!)[] self, Tensor[] other, *, Scalar alpha=1) -> () + arguments: + - annotation: a! + dynamic_type: TensorList + is_nullable: false + name: self + type: TensorList + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: other + type: TensorList + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + schema_order_cpp_signature: void (TensorList, TensorList, Scalar) + schema_order_arguments: + - annotation: a! + dynamic_type: TensorList + is_nullable: false + name: self + type: TensorList + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: other + type: TensorList + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_sub + operator_name: _foreach_sub + overload_name: List + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_foreach_sub.List(Tensor[] tensors1, Tensor[] tensors2, *, Scalar alpha=1) -> Tensor[] + arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors1 + type: TensorList + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors2 + type: TensorList + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + schema_order_cpp_signature: std::vector (TensorList, TensorList, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors1 + type: TensorList + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors2 + type: TensorList + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: TensorList + name: result + type: std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_sub_ + operator_name: _foreach_sub_ + overload_name: List + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_foreach_sub_.List(Tensor(a!)[] self, Tensor[] other, *, Scalar alpha=1) -> () + arguments: + - annotation: a! + dynamic_type: TensorList + is_nullable: false + name: self + type: TensorList + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: other + type: TensorList + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + schema_order_cpp_signature: void (TensorList, TensorList, Scalar) + schema_order_arguments: + - annotation: a! + dynamic_type: TensorList + is_nullable: false + name: self + type: TensorList + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: other + type: TensorList + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + kwarg_only: true + name: alpha + type: Scalar + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_mul + operator_name: _foreach_mul + overload_name: List + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_foreach_mul.List(Tensor[] tensors1, Tensor[] tensors2) -> Tensor[] + arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors1 + type: TensorList + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors2 + type: TensorList + schema_order_cpp_signature: std::vector (TensorList, TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors1 + type: TensorList + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors2 + type: TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: TensorList + name: result + type: std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_mul_ + operator_name: _foreach_mul_ + overload_name: List + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_foreach_mul_.List(Tensor(a!)[] self, Tensor[] other) -> () + arguments: + - annotation: a! + dynamic_type: TensorList + is_nullable: false + name: self + type: TensorList + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: other + type: TensorList + schema_order_cpp_signature: void (TensorList, TensorList) + schema_order_arguments: + - annotation: a! + dynamic_type: TensorList + is_nullable: false + name: self + type: TensorList + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: other + type: TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_div + operator_name: _foreach_div + overload_name: List + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_foreach_div.List(Tensor[] tensors1, Tensor[] tensors2) -> Tensor[] + arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors1 + type: TensorList + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors2 + type: TensorList + schema_order_cpp_signature: std::vector (TensorList, TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors1 + type: TensorList + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors2 + type: TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: TensorList + name: result + type: std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_div_ + operator_name: _foreach_div_ + overload_name: List + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_foreach_div_.List(Tensor(a!)[] self, Tensor[] other) -> () + arguments: + - annotation: a! + dynamic_type: TensorList + is_nullable: false + name: self + type: TensorList + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: other + type: TensorList + schema_order_cpp_signature: void (TensorList, TensorList) + schema_order_arguments: + - annotation: a! + dynamic_type: TensorList + is_nullable: false + name: self + type: TensorList + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: other + type: TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_add_scalar_list + operator_name: _foreach_add_scalar_list + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_foreach_add_scalar_list(Tensor[] tensors, float[] scalars) -> Tensor[] + arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors + type: TensorList + - annotation: null + dynamic_type: ArrayRef + is_nullable: false + name: scalars + type: ArrayRef + schema_order_cpp_signature: std::vector (TensorList, ArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors + type: TensorList + - annotation: null + dynamic_type: ArrayRef + is_nullable: false + name: scalars + type: ArrayRef + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: TensorList + name: result + type: std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_add_scalar_list_ + operator_name: _foreach_add_scalar_list_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_foreach_add_scalar_list_(Tensor(a!)[] self, float[] scalars) -> () + arguments: + - annotation: a! + dynamic_type: TensorList + is_nullable: false + name: self + type: TensorList + - annotation: null + dynamic_type: ArrayRef + is_nullable: false + name: scalars + type: ArrayRef + schema_order_cpp_signature: void (TensorList, ArrayRef) + schema_order_arguments: + - annotation: a! + dynamic_type: TensorList + is_nullable: false + name: self + type: TensorList + - annotation: null + dynamic_type: ArrayRef + is_nullable: false + name: scalars + type: ArrayRef + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_sub_scalar_list + operator_name: _foreach_sub_scalar_list + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_foreach_sub_scalar_list(Tensor[] tensors, float[] scalars) -> Tensor[] + arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors + type: TensorList + - annotation: null + dynamic_type: ArrayRef + is_nullable: false + name: scalars + type: ArrayRef + schema_order_cpp_signature: std::vector (TensorList, ArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors + type: TensorList + - annotation: null + dynamic_type: ArrayRef + is_nullable: false + name: scalars + type: ArrayRef + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: TensorList + name: result + type: std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_sub_scalar_list_ + operator_name: _foreach_sub_scalar_list_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_foreach_sub_scalar_list_(Tensor(a!)[] self, float[] scalars) -> () + arguments: + - annotation: a! + dynamic_type: TensorList + is_nullable: false + name: self + type: TensorList + - annotation: null + dynamic_type: ArrayRef + is_nullable: false + name: scalars + type: ArrayRef + schema_order_cpp_signature: void (TensorList, ArrayRef) + schema_order_arguments: + - annotation: a! + dynamic_type: TensorList + is_nullable: false + name: self + type: TensorList + - annotation: null + dynamic_type: ArrayRef + is_nullable: false + name: scalars + type: ArrayRef + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_div_scalar_list + operator_name: _foreach_div_scalar_list + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_foreach_div_scalar_list(Tensor[] tensors, float[] scalars) -> Tensor[] + arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors + type: TensorList + - annotation: null + dynamic_type: ArrayRef + is_nullable: false + name: scalars + type: ArrayRef + schema_order_cpp_signature: std::vector (TensorList, ArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors + type: TensorList + - annotation: null + dynamic_type: ArrayRef + is_nullable: false + name: scalars + type: ArrayRef + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: TensorList + name: result + type: std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_div_scalar_list_ + operator_name: _foreach_div_scalar_list_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_foreach_div_scalar_list_(Tensor(a!)[] self, float[] scalars) -> () + arguments: + - annotation: a! + dynamic_type: TensorList + is_nullable: false + name: self + type: TensorList + - annotation: null + dynamic_type: ArrayRef + is_nullable: false + name: scalars + type: ArrayRef + schema_order_cpp_signature: void (TensorList, ArrayRef) + schema_order_arguments: + - annotation: a! + dynamic_type: TensorList + is_nullable: false + name: self + type: TensorList + - annotation: null + dynamic_type: ArrayRef + is_nullable: false + name: scalars + type: ArrayRef + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_mul_scalar_list + operator_name: _foreach_mul_scalar_list + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_foreach_mul_scalar_list(Tensor[] tensors, float[] scalars) -> Tensor[] + arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors + type: TensorList + - annotation: null + dynamic_type: ArrayRef + is_nullable: false + name: scalars + type: ArrayRef + schema_order_cpp_signature: std::vector (TensorList, ArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors + type: TensorList + - annotation: null + dynamic_type: ArrayRef + is_nullable: false + name: scalars + type: ArrayRef + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: TensorList + name: result + type: std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_mul_scalar_list_ + operator_name: _foreach_mul_scalar_list_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_foreach_mul_scalar_list_(Tensor(a!)[] self, float[] scalars) -> () + arguments: + - annotation: a! + dynamic_type: TensorList + is_nullable: false + name: self + type: TensorList + - annotation: null + dynamic_type: ArrayRef + is_nullable: false + name: scalars + type: ArrayRef + schema_order_cpp_signature: void (TensorList, ArrayRef) + schema_order_arguments: + - annotation: a! + dynamic_type: TensorList + is_nullable: false + name: self + type: TensorList + - annotation: null + dynamic_type: ArrayRef + is_nullable: false + name: scalars + type: ArrayRef + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_exp + operator_name: _foreach_exp + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_foreach_exp(Tensor[] tensors) -> Tensor[] + arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors + type: TensorList + schema_order_cpp_signature: std::vector (TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors + type: TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: TensorList + name: result + type: std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_exp_ + operator_name: _foreach_exp_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_foreach_exp_(Tensor(a!)[] self) -> () + arguments: + - annotation: a! + dynamic_type: TensorList + is_nullable: false + name: self + type: TensorList + schema_order_cpp_signature: void (TensorList) + schema_order_arguments: + - annotation: a! + dynamic_type: TensorList + is_nullable: false + name: self + type: TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_sqrt + operator_name: _foreach_sqrt + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_foreach_sqrt(Tensor[] tensors) -> Tensor[] + arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors + type: TensorList + schema_order_cpp_signature: std::vector (TensorList) + schema_order_arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensors + type: TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: TensorList + name: result + type: std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_sqrt_ + operator_name: _foreach_sqrt_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_foreach_sqrt_(Tensor(a!)[] self) -> () + arguments: + - annotation: a! + dynamic_type: TensorList + is_nullable: false + name: self + type: TensorList + schema_order_cpp_signature: void (TensorList) + schema_order_arguments: + - annotation: a! + dynamic_type: TensorList + is_nullable: false + name: self + type: TensorList + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_addcdiv_ + operator_name: _foreach_addcdiv_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_foreach_addcdiv_(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> () + arguments: + - annotation: a! + dynamic_type: TensorList + is_nullable: false + name: self + type: TensorList + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensor1 + type: TensorList + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensor2 + type: TensorList + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + name: value + type: Scalar + schema_order_cpp_signature: void (TensorList, TensorList, TensorList, Scalar) + schema_order_arguments: + - annotation: a! + dynamic_type: TensorList + is_nullable: false + name: self + type: TensorList + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensor1 + type: TensorList + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensor2 + type: TensorList + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + name: value + type: Scalar + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_addcmul_ + operator_name: _foreach_addcmul_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_foreach_addcmul_(Tensor(a!)[] self, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> () + arguments: + - annotation: a! + dynamic_type: TensorList + is_nullable: false + name: self + type: TensorList + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensor1 + type: TensorList + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensor2 + type: TensorList + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + name: value + type: Scalar + schema_order_cpp_signature: void (TensorList, TensorList, TensorList, Scalar) + schema_order_arguments: + - annotation: a! + dynamic_type: TensorList + is_nullable: false + name: self + type: TensorList + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensor1 + type: TensorList + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensor2 + type: TensorList + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + name: value + type: Scalar + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: [] + inplace: true + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_addcdiv + operator_name: _foreach_addcdiv + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_foreach_addcdiv(Tensor[] input, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> Tensor[] + arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: input + type: TensorList + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensor1 + type: TensorList + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensor2 + type: TensorList + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + name: value + type: Scalar + schema_order_cpp_signature: std::vector (TensorList, TensorList, TensorList, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: input + type: TensorList + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensor1 + type: TensorList + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensor2 + type: TensorList + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + name: value + type: Scalar + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: TensorList + name: result + type: std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _foreach_addcmul + operator_name: _foreach_addcmul + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_foreach_addcmul(Tensor[] input, Tensor[] tensor1, Tensor[] tensor2, Scalar value=1) -> Tensor[] + arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: input + type: TensorList + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensor1 + type: TensorList + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensor2 + type: TensorList + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + name: value + type: Scalar + schema_order_cpp_signature: std::vector (TensorList, TensorList, TensorList, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: input + type: TensorList + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensor1 + type: TensorList + - annotation: null + dynamic_type: TensorList + is_nullable: false + name: tensor2 + type: TensorList + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + name: value + type: Scalar + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: TensorList + name: result + type: std::vector + inplace: false + is_factory_method: false + abstract: true + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: _mode + operator_name: _mode + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_mode(Tensor self, int dim=-1, bool keepdim=False) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, int64_t, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _mode_out + operator_name: _mode + overload_name: values + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_mode.values(Tensor self, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: values + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + is_nullable: false + name: indices + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, int64_t, bool, Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: values + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + is_nullable: false + name: indices + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: values + type: Tensor & + - dynamic_type: Tensor + name: indices + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bucketize + operator_name: bucketize + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::bucketize.Tensor(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: boundaries + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: out_int32 + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: right + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: boundaries + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: out_int32 + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: right + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bucketize_out + operator_name: bucketize + overload_name: Tensor_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::bucketize.Tensor_out(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: boundaries + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: out_int32 + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: right + type: bool + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, bool, bool, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: boundaries + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: out_int32 + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: right + type: bool + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: bucketize + operator_name: bucketize + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::bucketize.Scalar(Scalar self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> Tensor + arguments: + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: self + type: Scalar + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: boundaries + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: out_int32 + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: right + type: bool + schema_order_cpp_signature: Tensor (Scalar, const Tensor &, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: self + type: Scalar + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: boundaries + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: out_int32 + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: right + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: searchsorted + operator_name: searchsorted + overload_name: Tensor + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::searchsorted.Tensor(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: sorted_sequence + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: out_int32 + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: right + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: sorted_sequence + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: out_int32 + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: right + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: searchsorted_out + operator_name: searchsorted + overload_name: Tensor_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::searchsorted.Tensor_out(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: sorted_sequence + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: out_int32 + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: right + type: bool + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, bool, bool, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: sorted_sequence + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: out_int32 + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: right + type: bool + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: searchsorted + operator_name: searchsorted + overload_name: Scalar + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::searchsorted.Scalar(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: sorted_sequence + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: self + type: Scalar + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: out_int32 + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: right + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, Scalar, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: sorted_sequence + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: self + type: Scalar + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: out_int32 + type: bool + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + kwarg_only: true + name: right + type: bool + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: mse_loss_out + operator_name: mse_loss + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::mse_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, int64_t, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: mse_loss + operator_name: mse_loss + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::mse_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: mse_loss_backward_out + operator_name: mse_loss_backward + overload_name: grad_input + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::mse_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, const Tensor &, int64_t, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: grad_input + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: mse_loss_backward + operator_name: mse_loss_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::mse_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: l1_loss_out + operator_name: l1_loss + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::l1_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, int64_t, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: l1_loss + operator_name: l1_loss + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::l1_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: l1_loss_backward_out + operator_name: l1_loss_backward + overload_name: grad_input + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::l1_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, const Tensor &, int64_t, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: grad_input + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: l1_loss_backward + operator_name: l1_loss_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::l1_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: multi_margin_loss_out + operator_name: multi_margin_loss + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::multi_margin_loss.out(Tensor self, Tensor target, Scalar p=1, Scalar margin=1, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + name: p + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + name: margin + type: Scalar + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: weight + type: const Tensor & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, Scalar, Scalar, const Tensor &, int64_t, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + name: p + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + name: margin + type: Scalar + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: weight + type: const Tensor & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: multi_margin_loss + operator_name: multi_margin_loss + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::multi_margin_loss(Tensor self, Tensor target, Scalar p=1, Scalar margin=1, Tensor? weight=None, int reduction=Mean) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + name: p + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + name: margin + type: Scalar + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: weight + type: const c10::optional& + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, Scalar, Scalar, const c10::optional&, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + name: p + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + name: margin + type: Scalar + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: weight + type: const c10::optional& + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: multi_margin_loss_backward_out + operator_name: multi_margin_loss_backward + overload_name: grad_input + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::multi_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: p + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: margin + type: Scalar + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: weight + type: const Tensor & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, const Tensor &, Scalar, Scalar, const Tensor &, int64_t, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: p + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: margin + type: Scalar + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: weight + type: const Tensor & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: grad_input + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: multi_margin_loss_backward + operator_name: multi_margin_loss_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::multi_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, Scalar p, Scalar margin, Tensor? weight=None, int reduction=Mean) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: p + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: margin + type: Scalar + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: weight + type: const c10::optional& + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const Tensor &, Scalar, Scalar, const c10::optional&, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: p + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: margin + type: Scalar + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: weight + type: const c10::optional& + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: multilabel_margin_loss_out + operator_name: multilabel_margin_loss + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::multilabel_margin_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, int64_t, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: multilabel_margin_loss + operator_name: multilabel_margin_loss + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::multilabel_margin_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: multilabel_margin_loss_forward_out + operator_name: multilabel_margin_loss_forward + overload_name: output + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::multilabel_margin_loss_forward.output(Tensor self, Tensor target, int reduction, *, Tensor(a!) output, Tensor(b!) is_target) -> (Tensor(a!), Tensor(b!)) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: output + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + is_nullable: false + name: is_target + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, int64_t, Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: output + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + is_nullable: false + name: is_target + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: output + type: Tensor & + - dynamic_type: Tensor + name: is_target + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: multilabel_margin_loss_forward + operator_name: multilabel_margin_loss_forward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::multilabel_margin_loss_forward(Tensor self, Tensor target, int reduction) -> (Tensor output, Tensor is_target) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + field_name: output + name: output + type: Tensor + - dynamic_type: Tensor + field_name: is_target + name: is_target + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: multilabel_margin_loss_backward_out + operator_name: multilabel_margin_loss_backward + overload_name: grad_input + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::multilabel_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, Tensor is_target, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: is_target + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, const Tensor &, int64_t, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: is_target + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: grad_input + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: multilabel_margin_loss_backward + operator_name: multilabel_margin_loss_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::multilabel_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, Tensor is_target) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: is_target + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const Tensor &, int64_t, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: is_target + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: nll_loss_out + operator_name: nll_loss + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::nll_loss.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: weight + type: const Tensor & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + default: -100 + dynamic_type: int64_t + is_nullable: false + name: ignore_index + type: int64_t + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, const Tensor &, int64_t, int64_t, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: weight + type: const Tensor & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + default: -100 + dynamic_type: int64_t + is_nullable: false + name: ignore_index + type: int64_t + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: nll_loss + operator_name: nll_loss + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::nll_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: weight + type: const c10::optional& + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + default: -100 + dynamic_type: int64_t + is_nullable: false + name: ignore_index + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const c10::optional&, int64_t, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: weight + type: const c10::optional& + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + default: -100 + dynamic_type: int64_t + is_nullable: false + name: ignore_index + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: nll_loss_forward_out + operator_name: nll_loss_forward + overload_name: output + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::nll_loss_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!)) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: output + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + is_nullable: false + name: total_weight + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: weight + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: ignore_index + type: int64_t + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, const Tensor &, int64_t, int64_t, Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: weight + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: ignore_index + type: int64_t + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: output + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + is_nullable: false + name: total_weight + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: output + type: Tensor & + - dynamic_type: Tensor + name: total_weight + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: nll_loss_forward + operator_name: nll_loss_forward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::nll_loss_forward(Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index) -> (Tensor output, Tensor total_weight) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: weight + type: const c10::optional& + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: ignore_index + type: int64_t + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, const c10::optional&, int64_t, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: weight + type: const c10::optional& + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: ignore_index + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + field_name: output + name: output + type: Tensor + - dynamic_type: Tensor + field_name: total_weight + name: total_weight + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: nll_loss_backward_out + operator_name: nll_loss_backward + overload_name: grad_input + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::nll_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: weight + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: ignore_index + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: total_weight + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, const Tensor &, const Tensor &, int64_t, int64_t, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: weight + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: ignore_index + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: total_weight + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: grad_input + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: nll_loss_backward + operator_name: nll_loss_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::nll_loss_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, Tensor total_weight) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: weight + type: const c10::optional& + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: ignore_index + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: total_weight + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const Tensor &, const c10::optional&, int64_t, int64_t, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: weight + type: const c10::optional& + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: ignore_index + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: total_weight + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: nll_loss2d_out + operator_name: nll_loss2d + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::nll_loss2d.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: weight + type: const Tensor & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + default: -100 + dynamic_type: int64_t + is_nullable: false + name: ignore_index + type: int64_t + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, const Tensor &, int64_t, int64_t, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: weight + type: const Tensor & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + default: -100 + dynamic_type: int64_t + is_nullable: false + name: ignore_index + type: int64_t + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: nll_loss2d + operator_name: nll_loss2d + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::nll_loss2d(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, int ignore_index=-100) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: weight + type: const c10::optional& + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + default: -100 + dynamic_type: int64_t + is_nullable: false + name: ignore_index + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const c10::optional&, int64_t, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: weight + type: const c10::optional& + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + default: -100 + dynamic_type: int64_t + is_nullable: false + name: ignore_index + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: nll_loss2d_forward_out + operator_name: nll_loss2d_forward + overload_name: output + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::nll_loss2d_forward.output(Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, *, Tensor(a!) output, Tensor(b!) total_weight) -> (Tensor(a!), Tensor(b!)) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: output + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + is_nullable: false + name: total_weight + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: weight + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: ignore_index + type: int64_t + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, const Tensor &, int64_t, int64_t, Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: weight + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: ignore_index + type: int64_t + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: output + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + is_nullable: false + name: total_weight + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: output + type: Tensor & + - dynamic_type: Tensor + name: total_weight + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: nll_loss2d_forward + operator_name: nll_loss2d_forward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::nll_loss2d_forward(Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index) -> (Tensor output, Tensor total_weight) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: weight + type: const c10::optional& + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: ignore_index + type: int64_t + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, const c10::optional&, int64_t, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: weight + type: const c10::optional& + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: ignore_index + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + field_name: output + name: output + type: Tensor + - dynamic_type: Tensor + field_name: total_weight + name: total_weight + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: nll_loss2d_backward_out + operator_name: nll_loss2d_backward + overload_name: grad_input + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::nll_loss2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, Tensor total_weight, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: weight + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: ignore_index + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: total_weight + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, const Tensor &, const Tensor &, int64_t, int64_t, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: weight + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: ignore_index + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: total_weight + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: grad_input + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: nll_loss2d_backward + operator_name: nll_loss2d_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::nll_loss2d_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight, int reduction, int ignore_index, Tensor total_weight) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: weight + type: const c10::optional& + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: ignore_index + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: total_weight + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const Tensor &, const c10::optional&, int64_t, int64_t, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: weight + type: const c10::optional& + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: ignore_index + type: int64_t + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: total_weight + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: smooth_l1_loss_out + operator_name: smooth_l1_loss + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::smooth_l1_loss.out(Tensor self, Tensor target, int reduction=Mean, float beta=1.0, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + default: 1.0 + dynamic_type: double + is_nullable: false + name: beta + type: double + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, int64_t, double, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + default: 1.0 + dynamic_type: double + is_nullable: false + name: beta + type: double + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: smooth_l1_loss + operator_name: smooth_l1_loss + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::smooth_l1_loss(Tensor self, Tensor target, int reduction=Mean, float beta=1.0) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + default: 1.0 + dynamic_type: double + is_nullable: false + name: beta + type: double + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, int64_t, double) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + default: 1.0 + dynamic_type: double + is_nullable: false + name: beta + type: double + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: smooth_l1_loss_backward_out + operator_name: smooth_l1_loss_backward + overload_name: grad_input + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::smooth_l1_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + dynamic_type: double + is_nullable: false + name: beta + type: double + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, const Tensor &, int64_t, double, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + dynamic_type: double + is_nullable: false + name: beta + type: double + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: grad_input + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: smooth_l1_loss_backward + operator_name: smooth_l1_loss_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::smooth_l1_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + dynamic_type: double + is_nullable: false + name: beta + type: double + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const Tensor &, int64_t, double) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - annotation: null + dynamic_type: double + is_nullable: false + name: beta + type: double + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: soft_margin_loss_out + operator_name: soft_margin_loss + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::soft_margin_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, int64_t, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: soft_margin_loss + operator_name: soft_margin_loss + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::soft_margin_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + default: at::Reduction::Mean + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: soft_margin_loss_backward_out + operator_name: soft_margin_loss_backward + overload_name: grad_input + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::soft_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, const Tensor &, int64_t, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: grad_input + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: soft_margin_loss_backward + operator_name: soft_margin_loss_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::soft_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: target + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: reduction + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: elu_out + operator_name: elu + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::elu.out(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + name: alpha + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + name: scale + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + name: input_scale + type: Scalar + schema_order_cpp_signature: Tensor & (const Tensor &, Scalar, Scalar, Scalar, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + name: alpha + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + name: scale + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + name: input_scale + type: Scalar + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: elu + operator_name: elu + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::elu(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + name: alpha + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + name: scale + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + name: input_scale + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, Scalar, Scalar, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + name: alpha + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + name: scale + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + name: input_scale + type: Scalar + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: elu_backward_out + operator_name: elu_backward + overload_name: grad_input + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::elu_backward.grad_input(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: alpha + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: scale + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: input_scale + type: Scalar + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: output + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, Scalar, Scalar, Scalar, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: alpha + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: scale + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: input_scale + type: Scalar + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: output + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: grad_input + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: elu_backward + operator_name: elu_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::elu_backward(Tensor grad_output, Scalar alpha, Scalar scale, Scalar input_scale, Tensor output) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: alpha + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: scale + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: input_scale + type: Scalar + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: output + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, Scalar, Scalar, Scalar, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: alpha + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: scale + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: input_scale + type: Scalar + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: output + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: elu_ + operator_name: elu_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::elu_(Tensor(a!) self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + name: alpha + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + name: scale + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + name: input_scale + type: Scalar + schema_order_cpp_signature: Tensor & (Tensor &, Scalar, Scalar, Scalar) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + name: alpha + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + name: scale + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + name: input_scale + type: Scalar + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: glu_out + operator_name: glu + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::glu.out(Tensor self, int dim=-1, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: Tensor & (const Tensor &, int64_t, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: glu + operator_name: glu + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::glu(Tensor self, int dim=-1) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: glu_backward_out + operator_name: glu_backward + overload_name: grad_input + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::glu_backward.grad_input(Tensor grad_output, Tensor self, int dim, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, int64_t, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: grad_input + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: glu_backward + operator_name: glu_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::glu_backward(Tensor grad_output, Tensor self, int dim) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: hardsigmoid_out + operator_name: hardsigmoid + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::hardsigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: hardsigmoid + operator_name: hardsigmoid + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::hardsigmoid(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: hardsigmoid_ + operator_name: hardsigmoid_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::hardsigmoid_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + schema_order_cpp_signature: Tensor & (Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: hardsigmoid_backward + operator_name: hardsigmoid_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::hardsigmoid_backward(Tensor grad_output, Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: hardtanh_out + operator_name: hardtanh + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::hardtanh.out(Tensor self, Scalar min_val=-1, Scalar max_val=1, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: -1 + dynamic_type: Scalar + is_nullable: false + name: min_val + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + name: max_val + type: Scalar + schema_order_cpp_signature: Tensor & (const Tensor &, Scalar, Scalar, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: -1 + dynamic_type: Scalar + is_nullable: false + name: min_val + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + name: max_val + type: Scalar + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: hardtanh + operator_name: hardtanh + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::hardtanh(Tensor self, Scalar min_val=-1, Scalar max_val=1) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: -1 + dynamic_type: Scalar + is_nullable: false + name: min_val + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + name: max_val + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, Scalar, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: -1 + dynamic_type: Scalar + is_nullable: false + name: min_val + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + name: max_val + type: Scalar + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: hardtanh_backward_out + operator_name: hardtanh_backward + overload_name: grad_input + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::hardtanh_backward.grad_input(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: min_val + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: max_val + type: Scalar + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, Scalar, Scalar, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: min_val + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: max_val + type: Scalar + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: grad_input + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: hardtanh_backward + operator_name: hardtanh_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::hardtanh_backward(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: min_val + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: max_val + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, Scalar, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: min_val + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: max_val + type: Scalar + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: hardtanh_ + operator_name: hardtanh_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::hardtanh_(Tensor(a!) self, Scalar min_val=-1, Scalar max_val=1) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + default: -1 + dynamic_type: Scalar + is_nullable: false + name: min_val + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + name: max_val + type: Scalar + schema_order_cpp_signature: Tensor & (Tensor &, Scalar, Scalar) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + default: -1 + dynamic_type: Scalar + is_nullable: false + name: min_val + type: Scalar + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + name: max_val + type: Scalar + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: hardswish_out + operator_name: hardswish + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::hardswish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: hardswish + operator_name: hardswish + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::hardswish(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: hardswish_ + operator_name: hardswish_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::hardswish_(Tensor(a!) self) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + schema_order_cpp_signature: Tensor & (Tensor &) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: hardswish_backward + operator_name: hardswish_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::hardswish_backward(Tensor grad_output, Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: leaky_relu_out + operator_name: leaky_relu + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::leaky_relu.out(Tensor self, Scalar negative_slope=0.01, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: 0.01 + dynamic_type: Scalar + is_nullable: false + name: negative_slope + type: Scalar + schema_order_cpp_signature: Tensor & (const Tensor &, Scalar, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: 0.01 + dynamic_type: Scalar + is_nullable: false + name: negative_slope + type: Scalar + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: leaky_relu + operator_name: leaky_relu + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::leaky_relu(Tensor self, Scalar negative_slope=0.01) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: 0.01 + dynamic_type: Scalar + is_nullable: false + name: negative_slope + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: 0.01 + dynamic_type: Scalar + is_nullable: false + name: negative_slope + type: Scalar + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: leaky_relu_backward + operator_name: leaky_relu_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::leaky_relu_backward(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: negative_slope + type: Scalar + - annotation: null + dynamic_type: bool + is_nullable: false + name: self_is_result + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, Scalar, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: negative_slope + type: Scalar + - annotation: null + dynamic_type: bool + is_nullable: false + name: self_is_result + type: bool + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: leaky_relu_ + operator_name: leaky_relu_ + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::leaky_relu_(Tensor(a!) self, Scalar negative_slope=0.01) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + default: 0.01 + dynamic_type: Scalar + is_nullable: false + name: negative_slope + type: Scalar + schema_order_cpp_signature: Tensor & (Tensor &, Scalar) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + default: 0.01 + dynamic_type: Scalar + is_nullable: false + name: negative_slope + type: Scalar + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: log_sigmoid_out + operator_name: log_sigmoid + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::log_sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: log_sigmoid + operator_name: log_sigmoid + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::log_sigmoid(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: log_sigmoid_forward_out + operator_name: log_sigmoid_forward + overload_name: output + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::log_sigmoid_forward.output(Tensor self, *, Tensor(a!) output, Tensor(b!) buffer) -> (Tensor(a!), Tensor(b!)) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: output + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + is_nullable: false + name: buffer + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: std::tuple (const Tensor &, Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: output + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + is_nullable: false + name: buffer + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: output + type: Tensor & + - dynamic_type: Tensor + name: buffer + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: log_sigmoid_forward + operator_name: log_sigmoid_forward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::log_sigmoid_forward(Tensor self) -> (Tensor output, Tensor buffer) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: std::tuple (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + field_name: output + name: output + type: Tensor + - dynamic_type: Tensor + field_name: buffer + name: buffer + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: log_sigmoid_backward_out + operator_name: log_sigmoid_backward + overload_name: grad_input + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::log_sigmoid_backward.grad_input(Tensor grad_output, Tensor self, Tensor buffer, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: buffer + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: buffer + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: grad_input + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: log_sigmoid_backward + operator_name: log_sigmoid_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::log_sigmoid_backward(Tensor grad_output, Tensor self, Tensor buffer) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: buffer + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: buffer + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: rrelu_with_noise_out + operator_name: rrelu_with_noise + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::rrelu_with_noise.out(Tensor self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: noise + type: const Tensor & + - annotation: null + default: 0.125 + dynamic_type: Scalar + is_nullable: false + name: lower + type: Scalar + - annotation: null + default: 0.3333333333333333 + dynamic_type: Scalar + is_nullable: false + name: upper + type: Scalar + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: training + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: Generator + is_nullable: true + name: generator + type: c10::optional + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, Scalar, Scalar, bool, c10::optional, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: noise + type: const Tensor & + - annotation: null + default: 0.125 + dynamic_type: Scalar + is_nullable: false + name: lower + type: Scalar + - annotation: null + default: 0.3333333333333333 + dynamic_type: Scalar + is_nullable: false + name: upper + type: Scalar + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: training + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: Generator + is_nullable: true + name: generator + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: rrelu_with_noise + operator_name: rrelu_with_noise + overload_name: '' + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::rrelu_with_noise(Tensor self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: noise + type: const Tensor & + - annotation: null + default: 0.125 + dynamic_type: Scalar + is_nullable: false + name: lower + type: Scalar + - annotation: null + default: 0.3333333333333333 + dynamic_type: Scalar + is_nullable: false + name: upper + type: Scalar + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: training + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: Generator + is_nullable: true + name: generator + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, Scalar, Scalar, bool, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: noise + type: const Tensor & + - annotation: null + default: 0.125 + dynamic_type: Scalar + is_nullable: false + name: lower + type: Scalar + - annotation: null + default: 0.3333333333333333 + dynamic_type: Scalar + is_nullable: false + name: upper + type: Scalar + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: training + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: Generator + is_nullable: true + name: generator + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: rrelu_with_noise_backward + operator_name: rrelu_with_noise_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::rrelu_with_noise_backward(Tensor grad_output, Tensor self, Tensor noise, Scalar lower, Scalar upper, bool training, bool self_is_result) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: noise + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: lower + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: upper + type: Scalar + - annotation: null + dynamic_type: bool + is_nullable: false + name: training + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: self_is_result + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const Tensor &, Scalar, Scalar, bool, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: noise + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: lower + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: upper + type: Scalar + - annotation: null + dynamic_type: bool + is_nullable: false + name: training + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: self_is_result + type: bool + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: rrelu_with_noise_ + operator_name: rrelu_with_noise_ + overload_name: '' + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::rrelu_with_noise_(Tensor(a!) self, Tensor noise, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor(a!) + arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: noise + type: const Tensor & + - annotation: null + default: 0.125 + dynamic_type: Scalar + is_nullable: false + name: lower + type: Scalar + - annotation: null + default: 0.3333333333333333 + dynamic_type: Scalar + is_nullable: false + name: upper + type: Scalar + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: training + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: Generator + is_nullable: true + name: generator + type: c10::optional + schema_order_cpp_signature: Tensor & (Tensor &, const Tensor &, Scalar, Scalar, bool, c10::optional) + schema_order_arguments: + - annotation: a! + dynamic_type: Tensor + is_nullable: false + name: self + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: noise + type: const Tensor & + - annotation: null + default: 0.125 + dynamic_type: Scalar + is_nullable: false + name: lower + type: Scalar + - annotation: null + default: 0.3333333333333333 + dynamic_type: Scalar + is_nullable: false + name: upper + type: Scalar + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: training + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: Generator + is_nullable: true + name: generator + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: self + type: Tensor & + inplace: true + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: softplus_out + operator_name: softplus + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::softplus.out(Tensor self, Scalar beta=1, Scalar threshold=20, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + name: beta + type: Scalar + - annotation: null + default: 20 + dynamic_type: Scalar + is_nullable: false + name: threshold + type: Scalar + schema_order_cpp_signature: Tensor & (const Tensor &, Scalar, Scalar, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + name: beta + type: Scalar + - annotation: null + default: 20 + dynamic_type: Scalar + is_nullable: false + name: threshold + type: Scalar + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: softplus + operator_name: softplus + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::softplus(Tensor self, Scalar beta=1, Scalar threshold=20) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + name: beta + type: Scalar + - annotation: null + default: 20 + dynamic_type: Scalar + is_nullable: false + name: threshold + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, Scalar, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + name: beta + type: Scalar + - annotation: null + default: 20 + dynamic_type: Scalar + is_nullable: false + name: threshold + type: Scalar + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: softplus_backward_out + operator_name: softplus_backward + overload_name: grad_input + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::softplus_backward.grad_input(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: beta + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: threshold + type: Scalar + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: output + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, Scalar, Scalar, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: beta + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: threshold + type: Scalar + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: output + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: grad_input + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: softplus_backward + operator_name: softplus_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::softplus_backward(Tensor grad_output, Tensor self, Scalar beta, Scalar threshold, Tensor output) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: beta + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: threshold + type: Scalar + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: output + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, Scalar, Scalar, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: beta + type: Scalar + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: threshold + type: Scalar + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: output + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: softshrink_out + operator_name: softshrink + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::softshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: 0.5 + dynamic_type: Scalar + is_nullable: false + name: lambd + type: Scalar + schema_order_cpp_signature: Tensor & (const Tensor &, Scalar, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: 0.5 + dynamic_type: Scalar + is_nullable: false + name: lambd + type: Scalar + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: softshrink + operator_name: softshrink + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::softshrink(Tensor self, Scalar lambd=0.5) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: 0.5 + dynamic_type: Scalar + is_nullable: false + name: lambd + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: 0.5 + dynamic_type: Scalar + is_nullable: false + name: lambd + type: Scalar + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: softshrink_backward_out + operator_name: softshrink_backward + overload_name: grad_input + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::softshrink_backward.grad_input(Tensor grad_output, Tensor self, Scalar lambd, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: lambd + type: Scalar + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, Scalar, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: lambd + type: Scalar + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: grad_input + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: softshrink_backward + operator_name: softshrink_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::softshrink_backward(Tensor grad_output, Tensor self, Scalar lambd) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: lambd + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Scalar + is_nullable: false + name: lambd + type: Scalar + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: adaptive_avg_pool2d_out + operator_name: adaptive_avg_pool2d + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::adaptive_avg_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: IntArrayRef + schema_order_cpp_signature: Tensor & (const Tensor &, IntArrayRef, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: IntArrayRef + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: adaptive_avg_pool2d + operator_name: adaptive_avg_pool2d + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::adaptive_avg_pool2d(Tensor self, int[2] output_size) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: IntArrayRef + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: mkldnn_adaptive_avg_pool2d + operator_name: mkldnn_adaptive_avg_pool2d + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::mkldnn_adaptive_avg_pool2d(Tensor self, int[2] output_size) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: IntArrayRef + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _adaptive_avg_pool2d + operator_name: _adaptive_avg_pool2d + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_adaptive_avg_pool2d(Tensor self, int[2] output_size) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: IntArrayRef + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _adaptive_avg_pool2d_backward + operator_name: _adaptive_avg_pool2d_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_adaptive_avg_pool2d_backward(Tensor grad_output, Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: adaptive_avg_pool3d_out + operator_name: adaptive_avg_pool3d + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::adaptive_avg_pool3d.out(Tensor self, int[3] output_size, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: IntArrayRef + schema_order_cpp_signature: Tensor & (const Tensor &, IntArrayRef, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: IntArrayRef + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: adaptive_avg_pool3d + operator_name: adaptive_avg_pool3d + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::adaptive_avg_pool3d(Tensor self, int[3] output_size) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: IntArrayRef + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: adaptive_avg_pool3d_backward_out + operator_name: adaptive_avg_pool3d_backward + overload_name: grad_input + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::adaptive_avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: grad_input + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: adaptive_avg_pool3d_backward + operator_name: adaptive_avg_pool3d_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::adaptive_avg_pool3d_backward(Tensor grad_output, Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: adaptive_max_pool2d_out + operator_name: adaptive_max_pool2d + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::adaptive_max_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + is_nullable: false + name: indices + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: IntArrayRef + schema_order_cpp_signature: std::tuple (const Tensor &, IntArrayRef, Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: IntArrayRef + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + is_nullable: false + name: indices + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + - dynamic_type: Tensor + name: indices + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: adaptive_max_pool2d + operator_name: adaptive_max_pool2d + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::adaptive_max_pool2d(Tensor self, int[2] output_size) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: IntArrayRef + schema_order_cpp_signature: std::tuple (const Tensor &, IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: adaptive_max_pool2d_backward_out + operator_name: adaptive_max_pool2d_backward + overload_name: grad_input + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::adaptive_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: grad_input + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: adaptive_max_pool2d_backward + operator_name: adaptive_max_pool2d_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::adaptive_max_pool2d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: adaptive_max_pool3d_out + operator_name: adaptive_max_pool3d + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::adaptive_max_pool3d.out(Tensor self, int[3] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + is_nullable: false + name: indices + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: IntArrayRef + schema_order_cpp_signature: std::tuple (const Tensor &, IntArrayRef, Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: IntArrayRef + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + is_nullable: false + name: indices + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + - dynamic_type: Tensor + name: indices + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: adaptive_max_pool3d + operator_name: adaptive_max_pool3d + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::adaptive_max_pool3d(Tensor self, int[3] output_size) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: IntArrayRef + schema_order_cpp_signature: std::tuple (const Tensor &, IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: adaptive_max_pool3d_backward_out + operator_name: adaptive_max_pool3d_backward + overload_name: grad_input + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::adaptive_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: grad_input + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: adaptive_max_pool3d_backward + operator_name: adaptive_max_pool3d_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::adaptive_max_pool3d_backward(Tensor grad_output, Tensor self, Tensor indices) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: avg_pool2d_out + operator_name: avg_pool2d + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::avg_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: count_include_pad + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: divisor_override + type: c10::optional + schema_order_cpp_signature: Tensor & (const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, bool, bool, c10::optional, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: count_include_pad + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: divisor_override + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: avg_pool2d + operator_name: avg_pool2d + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::avg_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: count_include_pad + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: divisor_override + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, bool, bool, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: count_include_pad + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: divisor_override + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: avg_pool2d_backward_out + operator_name: avg_pool2d_backward + overload_name: grad_input + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::avg_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: count_include_pad + type: bool + - annotation: null + dynamic_type: int64_t + is_nullable: true + name: divisor_override + type: c10::optional + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, bool, bool, c10::optional, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: count_include_pad + type: bool + - annotation: null + dynamic_type: int64_t + is_nullable: true + name: divisor_override + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: grad_input + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: avg_pool2d_backward + operator_name: avg_pool2d_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::avg_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: count_include_pad + type: bool + - annotation: null + dynamic_type: int64_t + is_nullable: true + name: divisor_override + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, bool, bool, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: count_include_pad + type: bool + - annotation: null + dynamic_type: int64_t + is_nullable: true + name: divisor_override + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: avg_pool3d_out + operator_name: avg_pool3d + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::avg_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 3 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 3 + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: count_include_pad + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: divisor_override + type: c10::optional + schema_order_cpp_signature: Tensor & (const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, bool, bool, c10::optional, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 3 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 3 + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: count_include_pad + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: divisor_override + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: avg_pool3d + operator_name: avg_pool3d + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::avg_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 3 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 3 + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: count_include_pad + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: divisor_override + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, bool, bool, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 3 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 3 + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + - annotation: null + default: true + dynamic_type: bool + is_nullable: false + name: count_include_pad + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: divisor_override + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: avg_pool3d_backward_out + operator_name: avg_pool3d_backward + overload_name: grad_input + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: count_include_pad + type: bool + - annotation: null + dynamic_type: int64_t + is_nullable: true + name: divisor_override + type: c10::optional + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, bool, bool, c10::optional, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: count_include_pad + type: bool + - annotation: null + dynamic_type: int64_t + is_nullable: true + name: divisor_override + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: grad_input + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: avg_pool3d_backward + operator_name: avg_pool3d_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::avg_pool3d_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: count_include_pad + type: bool + - annotation: null + dynamic_type: int64_t + is_nullable: true + name: divisor_override + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, bool, bool, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + - annotation: null + dynamic_type: bool + is_nullable: false + name: count_include_pad + type: bool + - annotation: null + dynamic_type: int64_t + is_nullable: true + name: divisor_override + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: fractional_max_pool2d_out + operator_name: fractional_max_pool2d + overload_name: output + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::fractional_max_pool2d.output(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: output + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + is_nullable: false + name: indices + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: random_samples + type: const Tensor & + schema_order_cpp_signature: std::tuple (const Tensor &, IntArrayRef, IntArrayRef, const Tensor &, Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: random_samples + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: output + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + is_nullable: false + name: indices + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: output + type: Tensor & + - dynamic_type: Tensor + name: indices + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: fractional_max_pool2d + operator_name: fractional_max_pool2d + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::fractional_max_pool2d(Tensor self, int[2] kernel_size, int[2] output_size, Tensor random_samples) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: random_samples + type: const Tensor & + schema_order_cpp_signature: std::tuple (const Tensor &, IntArrayRef, IntArrayRef, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: random_samples + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: fractional_max_pool2d_backward_out + operator_name: fractional_max_pool2d_backward + overload_name: grad_input + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::fractional_max_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: grad_input + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: fractional_max_pool2d_backward + operator_name: fractional_max_pool2d_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::fractional_max_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] output_size, Tensor indices) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: fractional_max_pool3d_out + operator_name: fractional_max_pool3d + overload_name: output + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::fractional_max_pool3d.output(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples, *, Tensor(a!) output, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: output + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + is_nullable: false + name: indices + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: random_samples + type: const Tensor & + schema_order_cpp_signature: std::tuple (const Tensor &, IntArrayRef, IntArrayRef, const Tensor &, Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: random_samples + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: output + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + is_nullable: false + name: indices + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: output + type: Tensor & + - dynamic_type: Tensor + name: indices + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: fractional_max_pool3d + operator_name: fractional_max_pool3d + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::fractional_max_pool3d(Tensor self, int[3] kernel_size, int[3] output_size, Tensor random_samples) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: random_samples + type: const Tensor & + schema_order_cpp_signature: std::tuple (const Tensor &, IntArrayRef, IntArrayRef, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: random_samples + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: fractional_max_pool3d_backward_out + operator_name: fractional_max_pool3d_backward + overload_name: grad_input + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::fractional_max_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: grad_input + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: fractional_max_pool3d_backward + operator_name: fractional_max_pool3d_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::fractional_max_pool3d_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] output_size, Tensor indices) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: max_pool2d_with_indices_out + operator_name: max_pool2d_with_indices + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::max_pool2d_with_indices.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + is_nullable: false + name: indices + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, bool, Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + is_nullable: false + name: indices + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + - dynamic_type: Tensor + name: indices + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: max_pool2d_with_indices + operator_name: max_pool2d_with_indices + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::max_pool2d_with_indices(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: max_pool2d_with_indices_backward_out + operator_name: max_pool2d_with_indices_backward + overload_name: grad_input + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::max_pool2d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, bool, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: grad_input + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: max_pool2d_with_indices_backward + operator_name: max_pool2d_with_indices_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::max_pool2d_with_indices_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool ceil_mode, Tensor indices) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, bool, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: max_pool3d_with_indices_out + operator_name: max_pool3d_with_indices + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::max_pool3d_with_indices.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!)) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + is_nullable: false + name: indices + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 3 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 3 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 3 + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, bool, Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 3 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 3 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 3 + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + is_nullable: false + name: indices + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + - dynamic_type: Tensor + name: indices + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: max_pool3d_with_indices + operator_name: max_pool3d_with_indices + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::max_pool3d_with_indices(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 3 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 3 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 3 + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + schema_order_cpp_signature: std::tuple (const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 3 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 3 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 3 + type: IntArrayRef + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result0 + type: Tensor + - dynamic_type: Tensor + name: result1 + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: max_pool3d_with_indices_backward_out + operator_name: max_pool3d_with_indices_backward + overload_name: grad_input + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::max_pool3d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, bool, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: grad_input + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: max_pool3d_with_indices_backward + operator_name: max_pool3d_with_indices_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::max_pool3d_with_indices_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, bool, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: ceil_mode + type: bool + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: max_unpool2d_out + operator_name: max_unpool2d + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::max_unpool2d.out(Tensor self, Tensor indices, int[2] output_size, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: IntArrayRef + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, IntArrayRef, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: IntArrayRef + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: max_unpool2d + operator_name: max_unpool2d + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::max_unpool2d(Tensor self, Tensor indices, int[2] output_size) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: IntArrayRef + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: max_unpool2d_backward_out + operator_name: max_unpool2d_backward + overload_name: grad_input + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::max_unpool2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, int[2] output_size, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: IntArrayRef + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, const Tensor &, IntArrayRef, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: IntArrayRef + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: grad_input + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: max_unpool2d_backward + operator_name: max_unpool2d_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::max_unpool2d_backward(Tensor grad_output, Tensor self, Tensor indices, int[2] output_size) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: IntArrayRef + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const Tensor &, IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: max_unpool3d_out + operator_name: max_unpool3d + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::max_unpool3d.out(Tensor self, Tensor indices, int[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 3 + type: IntArrayRef + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 3 + type: IntArrayRef + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: max_unpool3d + operator_name: max_unpool3d + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::max_unpool3d(Tensor self, Tensor indices, int[3] output_size, int[3] stride, int[3] padding) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 3 + type: IntArrayRef + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 3 + type: IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: max_unpool3d_backward_out + operator_name: max_unpool3d_backward + overload_name: grad_input + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::max_unpool3d_backward.grad_input(Tensor grad_output, Tensor self, Tensor indices, int[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 3 + type: IntArrayRef + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 3 + type: IntArrayRef + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: grad_input + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: max_unpool3d_backward + operator_name: max_unpool3d_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::max_unpool3d_backward(Tensor grad_output, Tensor self, Tensor indices, int[3] output_size, int[3] stride, int[3] padding) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 3 + type: IntArrayRef + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: indices + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 3 + type: IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: reflection_pad1d_out + operator_name: reflection_pad1d + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::reflection_pad1d.out(Tensor self, int[2] padding, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + schema_order_cpp_signature: Tensor & (const Tensor &, IntArrayRef, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: reflection_pad1d + operator_name: reflection_pad1d + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::reflection_pad1d(Tensor self, int[2] padding) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: reflection_pad1d_backward_out + operator_name: reflection_pad1d_backward + overload_name: grad_input + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::reflection_pad1d_backward.grad_input(Tensor grad_output, Tensor self, int[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, IntArrayRef, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: grad_input + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: reflection_pad1d_backward + operator_name: reflection_pad1d_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::reflection_pad1d_backward(Tensor grad_output, Tensor self, int[2] padding) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: reflection_pad2d_out + operator_name: reflection_pad2d + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::reflection_pad2d.out(Tensor self, int[4] padding, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 4 + type: IntArrayRef + schema_order_cpp_signature: Tensor & (const Tensor &, IntArrayRef, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 4 + type: IntArrayRef + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: reflection_pad2d + operator_name: reflection_pad2d + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::reflection_pad2d(Tensor self, int[4] padding) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 4 + type: IntArrayRef + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 4 + type: IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: reflection_pad2d_backward_out + operator_name: reflection_pad2d_backward + overload_name: grad_input + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::reflection_pad2d_backward.grad_input(Tensor grad_output, Tensor self, int[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 4 + type: IntArrayRef + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, IntArrayRef, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 4 + type: IntArrayRef + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: grad_input + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: reflection_pad2d_backward + operator_name: reflection_pad2d_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::reflection_pad2d_backward(Tensor grad_output, Tensor self, int[4] padding) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 4 + type: IntArrayRef + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 4 + type: IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: replication_pad1d_out + operator_name: replication_pad1d + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::replication_pad1d.out(Tensor self, int[2] padding, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + schema_order_cpp_signature: Tensor & (const Tensor &, IntArrayRef, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: replication_pad1d + operator_name: replication_pad1d + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::replication_pad1d(Tensor self, int[2] padding) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: replication_pad1d_backward_out + operator_name: replication_pad1d_backward + overload_name: grad_input + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::replication_pad1d_backward.grad_input(Tensor grad_output, Tensor self, int[2] padding, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, IntArrayRef, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: grad_input + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: replication_pad1d_backward + operator_name: replication_pad1d_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::replication_pad1d_backward(Tensor grad_output, Tensor self, int[2] padding) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: replication_pad2d_out + operator_name: replication_pad2d + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::replication_pad2d.out(Tensor self, int[4] padding, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 4 + type: IntArrayRef + schema_order_cpp_signature: Tensor & (const Tensor &, IntArrayRef, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 4 + type: IntArrayRef + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: replication_pad2d + operator_name: replication_pad2d + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::replication_pad2d(Tensor self, int[4] padding) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 4 + type: IntArrayRef + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 4 + type: IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: replication_pad2d_backward_out + operator_name: replication_pad2d_backward + overload_name: grad_input + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::replication_pad2d_backward.grad_input(Tensor grad_output, Tensor self, int[4] padding, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 4 + type: IntArrayRef + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, IntArrayRef, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 4 + type: IntArrayRef + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: grad_input + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: replication_pad2d_backward + operator_name: replication_pad2d_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::replication_pad2d_backward(Tensor grad_output, Tensor self, int[4] padding) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 4 + type: IntArrayRef + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 4 + type: IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: replication_pad3d_out + operator_name: replication_pad3d + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::replication_pad3d.out(Tensor self, int[6] padding, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 6 + type: IntArrayRef + schema_order_cpp_signature: Tensor & (const Tensor &, IntArrayRef, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 6 + type: IntArrayRef + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: replication_pad3d + operator_name: replication_pad3d + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::replication_pad3d(Tensor self, int[6] padding) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 6 + type: IntArrayRef + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 6 + type: IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: replication_pad3d_backward_out + operator_name: replication_pad3d_backward + overload_name: grad_input + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::replication_pad3d_backward.grad_input(Tensor grad_output, Tensor self, int[6] padding, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 6 + type: IntArrayRef + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, IntArrayRef, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 6 + type: IntArrayRef + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: grad_input + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: replication_pad3d_backward + operator_name: replication_pad3d_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::replication_pad3d_backward(Tensor grad_output, Tensor self, int[6] padding) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 6 + type: IntArrayRef + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 6 + type: IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_linear1d + operator_name: upsample_linear1d + overload_name: vec + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::upsample_linear1d.vec(Tensor input, int[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + dynamic_type: ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + schema_order_cpp_signature: Tensor (const Tensor &, c10::optional, bool, c10::optional>) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + dynamic_type: ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_linear1d_backward + operator_name: upsample_linear1d_backward + overload_name: vec + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::upsample_linear1d_backward.vec(Tensor grad_output, int[]? output_size, int[] input_size, bool align_corners, float[]? scale_factors) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: input_size + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + dynamic_type: ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + schema_order_cpp_signature: Tensor (const Tensor &, c10::optional, IntArrayRef, bool, c10::optional>) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: input_size + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + dynamic_type: ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_bilinear2d + operator_name: upsample_bilinear2d + overload_name: vec + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::upsample_bilinear2d.vec(Tensor input, int[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + dynamic_type: ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + schema_order_cpp_signature: Tensor (const Tensor &, c10::optional, bool, c10::optional>) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + dynamic_type: ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_bilinear2d_backward + operator_name: upsample_bilinear2d_backward + overload_name: vec + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::upsample_bilinear2d_backward.vec(Tensor grad_output, int[]? output_size, int[] input_size, bool align_corners, float[]? scale_factors) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: input_size + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + dynamic_type: ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + schema_order_cpp_signature: Tensor (const Tensor &, c10::optional, IntArrayRef, bool, c10::optional>) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: input_size + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + dynamic_type: ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_trilinear3d + operator_name: upsample_trilinear3d + overload_name: vec + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::upsample_trilinear3d.vec(Tensor input, int[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + dynamic_type: ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + schema_order_cpp_signature: Tensor (const Tensor &, c10::optional, bool, c10::optional>) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + dynamic_type: ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_trilinear3d_backward + operator_name: upsample_trilinear3d_backward + overload_name: vec + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::upsample_trilinear3d_backward.vec(Tensor grad_output, int[]? output_size, int[] input_size, bool align_corners, float[]? scale_factors) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: input_size + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + dynamic_type: ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + schema_order_cpp_signature: Tensor (const Tensor &, c10::optional, IntArrayRef, bool, c10::optional>) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: input_size + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + dynamic_type: ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_bicubic2d + operator_name: upsample_bicubic2d + overload_name: vec + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::upsample_bicubic2d.vec(Tensor input, int[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + dynamic_type: ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + schema_order_cpp_signature: Tensor (const Tensor &, c10::optional, bool, c10::optional>) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + dynamic_type: ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_bicubic2d_backward + operator_name: upsample_bicubic2d_backward + overload_name: vec + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::upsample_bicubic2d_backward.vec(Tensor grad_output, int[]? output_size, int[] input_size, bool align_corners, float[]? scale_factors) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: input_size + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + dynamic_type: ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + schema_order_cpp_signature: Tensor (const Tensor &, c10::optional, IntArrayRef, bool, c10::optional>) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: input_size + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + dynamic_type: ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_nearest1d + operator_name: upsample_nearest1d + overload_name: vec + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::upsample_nearest1d.vec(Tensor input, int[]? output_size, float[]? scale_factors) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + schema_order_cpp_signature: Tensor (const Tensor &, c10::optional, c10::optional>) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_nearest1d_backward + operator_name: upsample_nearest1d_backward + overload_name: vec + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::upsample_nearest1d_backward.vec(Tensor grad_output, int[]? output_size, int[] input_size, float[]? scale_factors) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: input_size + type: IntArrayRef + - annotation: null + dynamic_type: ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + schema_order_cpp_signature: Tensor (const Tensor &, c10::optional, IntArrayRef, c10::optional>) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: input_size + type: IntArrayRef + - annotation: null + dynamic_type: ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_nearest2d + operator_name: upsample_nearest2d + overload_name: vec + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::upsample_nearest2d.vec(Tensor input, int[]? output_size, float[]? scale_factors) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + schema_order_cpp_signature: Tensor (const Tensor &, c10::optional, c10::optional>) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_nearest2d_backward + operator_name: upsample_nearest2d_backward + overload_name: vec + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::upsample_nearest2d_backward.vec(Tensor grad_output, int[]? output_size, int[] input_size, float[]? scale_factors) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: input_size + type: IntArrayRef + - annotation: null + dynamic_type: ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + schema_order_cpp_signature: Tensor (const Tensor &, c10::optional, IntArrayRef, c10::optional>) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: input_size + type: IntArrayRef + - annotation: null + dynamic_type: ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_nearest3d + operator_name: upsample_nearest3d + overload_name: vec + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::upsample_nearest3d.vec(Tensor input, int[]? output_size, float[]? scale_factors) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + schema_order_cpp_signature: Tensor (const Tensor &, c10::optional, c10::optional>) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: input + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_nearest3d_backward + operator_name: upsample_nearest3d_backward + overload_name: vec + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::upsample_nearest3d_backward.vec(Tensor grad_output, int[]? output_size, int[] input_size, float[]? scale_factors) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: input_size + type: IntArrayRef + - annotation: null + dynamic_type: ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + schema_order_cpp_signature: Tensor (const Tensor &, c10::optional, IntArrayRef, c10::optional>) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: true + name: output_size + type: c10::optional + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: input_size + type: IntArrayRef + - annotation: null + dynamic_type: ArrayRef + is_nullable: true + name: scale_factors + type: c10::optional> + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_linear1d_out + operator_name: upsample_linear1d + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::upsample_linear1d.out(Tensor self, int[1] output_size, bool align_corners, float? scales=None, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 1 + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales + type: c10::optional + schema_order_cpp_signature: Tensor & (const Tensor &, IntArrayRef, bool, c10::optional, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 1 + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_linear1d + operator_name: upsample_linear1d + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::upsample_linear1d(Tensor self, int[1] output_size, bool align_corners, float? scales=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 1 + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef, bool, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 1 + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_linear1d_backward_out + operator_name: upsample_linear1d_backward + overload_name: grad_input + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::upsample_linear1d_backward.grad_input(Tensor grad_output, int[1] output_size, int[3] input_size, bool align_corners, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 1 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: input_size + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales + type: c10::optional + schema_order_cpp_signature: Tensor & (const Tensor &, IntArrayRef, IntArrayRef, bool, c10::optional, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 1 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: input_size + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: grad_input + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_linear1d_backward + operator_name: upsample_linear1d_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::upsample_linear1d_backward(Tensor grad_output, int[1] output_size, int[3] input_size, bool align_corners, float? scales=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 1 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: input_size + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef, IntArrayRef, bool, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 1 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: input_size + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_bilinear2d_out + operator_name: upsample_bilinear2d + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::upsample_bilinear2d.out(Tensor self, int[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + schema_order_cpp_signature: Tensor & (const Tensor &, IntArrayRef, bool, c10::optional, c10::optional, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_bilinear2d + operator_name: upsample_bilinear2d + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::upsample_bilinear2d(Tensor self, int[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef, bool, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_bilinear2d_backward_out + operator_name: upsample_bilinear2d_backward + overload_name: grad_input + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::upsample_bilinear2d_backward.grad_input(Tensor grad_output, int[2] output_size, int[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: input_size + size: 4 + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + schema_order_cpp_signature: Tensor & (const Tensor &, IntArrayRef, IntArrayRef, bool, c10::optional, c10::optional, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: input_size + size: 4 + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: grad_input + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_bilinear2d_backward + operator_name: upsample_bilinear2d_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::upsample_bilinear2d_backward(Tensor grad_output, int[2] output_size, int[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: input_size + size: 4 + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef, IntArrayRef, bool, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: input_size + size: 4 + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_bicubic2d_out + operator_name: upsample_bicubic2d + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::upsample_bicubic2d.out(Tensor self, int[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + schema_order_cpp_signature: Tensor & (const Tensor &, IntArrayRef, bool, c10::optional, c10::optional, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_bicubic2d + operator_name: upsample_bicubic2d + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::upsample_bicubic2d(Tensor self, int[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef, bool, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_bicubic2d_backward_out + operator_name: upsample_bicubic2d_backward + overload_name: grad_input + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::upsample_bicubic2d_backward.grad_input(Tensor grad_output, int[2] output_size, int[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: input_size + size: 4 + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + schema_order_cpp_signature: Tensor & (const Tensor &, IntArrayRef, IntArrayRef, bool, c10::optional, c10::optional, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: input_size + size: 4 + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: grad_input + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_bicubic2d_backward + operator_name: upsample_bicubic2d_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::upsample_bicubic2d_backward(Tensor grad_output, int[2] output_size, int[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: input_size + size: 4 + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef, IntArrayRef, bool, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: input_size + size: 4 + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_trilinear3d_out + operator_name: upsample_trilinear3d + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::upsample_trilinear3d.out(Tensor self, int[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_d + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + schema_order_cpp_signature: Tensor & (const Tensor &, IntArrayRef, bool, c10::optional, c10::optional, c10::optional, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_d + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_trilinear3d + operator_name: upsample_trilinear3d + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::upsample_trilinear3d(Tensor self, int[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_d + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef, bool, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_d + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_trilinear3d_backward_out + operator_name: upsample_trilinear3d_backward + overload_name: grad_input + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::upsample_trilinear3d_backward.grad_input(Tensor grad_output, int[3] output_size, int[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: input_size + size: 5 + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_d + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + schema_order_cpp_signature: Tensor & (const Tensor &, IntArrayRef, IntArrayRef, bool, c10::optional, c10::optional, c10::optional, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: input_size + size: 5 + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_d + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: grad_input + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_trilinear3d_backward + operator_name: upsample_trilinear3d_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::upsample_trilinear3d_backward(Tensor grad_output, int[3] output_size, int[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: input_size + size: 5 + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_d + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef, IntArrayRef, bool, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: input_size + size: 5 + type: IntArrayRef + - annotation: null + dynamic_type: bool + is_nullable: false + name: align_corners + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_d + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_nearest1d_out + operator_name: upsample_nearest1d + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::upsample_nearest1d.out(Tensor self, int[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 1 + type: IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales + type: c10::optional + schema_order_cpp_signature: Tensor & (const Tensor &, IntArrayRef, c10::optional, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 1 + type: IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_nearest1d + operator_name: upsample_nearest1d + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::upsample_nearest1d(Tensor self, int[1] output_size, float? scales=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 1 + type: IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 1 + type: IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_nearest1d_backward_out + operator_name: upsample_nearest1d_backward + overload_name: grad_input + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::upsample_nearest1d_backward.grad_input(Tensor grad_output, int[1] output_size, int[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 1 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: input_size + size: 3 + type: IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales + type: c10::optional + schema_order_cpp_signature: Tensor & (const Tensor &, IntArrayRef, IntArrayRef, c10::optional, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 1 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: input_size + size: 3 + type: IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: grad_input + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_nearest1d_backward + operator_name: upsample_nearest1d_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::upsample_nearest1d_backward(Tensor grad_output, int[1] output_size, int[3] input_size, float? scales=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 1 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: input_size + size: 3 + type: IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef, IntArrayRef, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 1 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: input_size + size: 3 + type: IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_nearest2d_out + operator_name: upsample_nearest2d + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::upsample_nearest2d.out(Tensor self, int[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + schema_order_cpp_signature: Tensor & (const Tensor &, IntArrayRef, c10::optional, c10::optional, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_nearest2d + operator_name: upsample_nearest2d + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::upsample_nearest2d(Tensor self, int[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_nearest2d_backward_out + operator_name: upsample_nearest2d_backward + overload_name: grad_input + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::upsample_nearest2d_backward.grad_input(Tensor grad_output, int[2] output_size, int[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: input_size + size: 4 + type: IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + schema_order_cpp_signature: Tensor & (const Tensor &, IntArrayRef, IntArrayRef, c10::optional, c10::optional, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: input_size + size: 4 + type: IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: grad_input + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_nearest2d_backward + operator_name: upsample_nearest2d_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::upsample_nearest2d_backward(Tensor grad_output, int[2] output_size, int[4] input_size, float? scales_h=None, float? scales_w=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: input_size + size: 4 + type: IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef, IntArrayRef, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: input_size + size: 4 + type: IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_nearest3d_out + operator_name: upsample_nearest3d + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::upsample_nearest3d.out(Tensor self, int[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_d + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + schema_order_cpp_signature: Tensor & (const Tensor &, IntArrayRef, c10::optional, c10::optional, c10::optional, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_d + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_nearest3d + operator_name: upsample_nearest3d + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::upsample_nearest3d(Tensor self, int[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_d + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_d + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_nearest3d_backward_out + operator_name: upsample_nearest3d_backward + overload_name: grad_input + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::upsample_nearest3d_backward.grad_input(Tensor grad_output, int[3] output_size, int[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: input_size + size: 5 + type: IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_d + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + schema_order_cpp_signature: Tensor & (const Tensor &, IntArrayRef, IntArrayRef, c10::optional, c10::optional, c10::optional, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: input_size + size: 5 + type: IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_d + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: grad_input + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: upsample_nearest3d_backward + operator_name: upsample_nearest3d_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::upsample_nearest3d_backward(Tensor grad_output, int[3] output_size, int[5] input_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: input_size + size: 5 + type: IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_d + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef, IntArrayRef, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: input_size + size: 5 + type: IntArrayRef + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_d + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_h + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: scales_w + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sigmoid_backward_out + operator_name: sigmoid_backward + overload_name: grad_input + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::sigmoid_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: output + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: output + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: grad_input + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: sigmoid_backward + operator_name: sigmoid_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::sigmoid_backward(Tensor grad_output, Tensor output) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: output + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: output + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logit_backward_out + operator_name: logit_backward + overload_name: grad_input + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::logit_backward.grad_input(Tensor grad_output, Tensor self, float? eps=None, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: eps + type: c10::optional + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, c10::optional, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: eps + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: grad_input + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: logit_backward + operator_name: logit_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::logit_backward(Tensor grad_output, Tensor self, float? eps=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: eps + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: double + is_nullable: true + name: eps + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: tanh_backward_out + operator_name: tanh_backward + overload_name: grad_input + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::tanh_backward.grad_input(Tensor grad_output, Tensor output, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: output + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: output + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: grad_input + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: tanh_backward + operator_name: tanh_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::tanh_backward(Tensor grad_output, Tensor output) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: output + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: output + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: slow_conv_transpose2d_out + operator_name: slow_conv_transpose2d + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::slow_conv_transpose2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] output_padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: bias + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: output_padding + size: 2 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: IntArrayRef + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, IntArrayRef, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: bias + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: output_padding + size: 2 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: IntArrayRef + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: slow_conv_transpose2d + operator_name: slow_conv_transpose2d + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::slow_conv_transpose2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] output_padding=0, int[2] dilation=1) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: output_padding + size: 2 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: IntArrayRef + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, IntArrayRef, const c10::optional&, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: output_padding + size: 2 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: slow_conv_transpose2d_backward_out + operator_name: slow_conv_transpose2d_backward + overload_name: grad_output + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::slow_conv_transpose2d_backward.grad_output(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, int[2] output_padding, int[2] dilation, Tensor columns, Tensor ones, *, Tensor(a!)? grad_input, Tensor(b!)? grad_weight, Tensor(c!)? grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: true + name: grad_input + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + is_nullable: true + name: grad_weight + output: true + type: Tensor & + - allocate: true + annotation: c! + dynamic_type: Tensor + is_nullable: true + name: grad_bias + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_padding + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: columns + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: ones + type: const Tensor & + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, const Tensor &, const Tensor &, Tensor &, Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_padding + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: columns + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: ones + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: true + name: grad_input + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + is_nullable: true + name: grad_weight + output: true + type: Tensor & + - allocate: true + annotation: c! + dynamic_type: Tensor + is_nullable: true + name: grad_bias + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: grad_input + type: Tensor & + - dynamic_type: Tensor + name: grad_weight + type: Tensor & + - dynamic_type: Tensor + name: grad_bias + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: slow_conv_transpose2d_backward + operator_name: slow_conv_transpose2d_backward + overload_name: output_mask + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::slow_conv_transpose2d_backward.output_mask(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, int[2] output_padding, int[2] dilation, Tensor columns, Tensor ones, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_padding + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: columns + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: ones + type: const Tensor & + - annotation: null + dynamic_type: std::array + is_nullable: false + name: output_mask + type: std::array + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, const Tensor &, const Tensor &, std::array) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_padding + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: columns + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: ones + type: const Tensor & + - annotation: null + dynamic_type: std::array + is_nullable: false + name: output_mask + type: std::array + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + field_name: grad_input + name: grad_input + type: Tensor + - dynamic_type: Tensor + field_name: grad_weight + name: grad_weight + type: Tensor + - dynamic_type: Tensor + field_name: grad_bias + name: grad_bias + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: slow_conv_transpose3d_out + operator_name: slow_conv_transpose3d + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::slow_conv_transpose3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] output_padding=0, int[3] dilation=1, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: bias + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 3 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 3 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: output_padding + size: 3 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 3 + type: IntArrayRef + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, IntArrayRef, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: bias + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 3 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 3 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: output_padding + size: 3 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 3 + type: IntArrayRef + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: slow_conv_transpose3d + operator_name: slow_conv_transpose3d + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::slow_conv_transpose3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] output_padding=0, int[3] dilation=1) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 3 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 3 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: output_padding + size: 3 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 3 + type: IntArrayRef + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, IntArrayRef, const c10::optional&, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 3 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 3 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: output_padding + size: 3 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 3 + type: IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: slow_conv_transpose3d_backward_out + operator_name: slow_conv_transpose3d_backward + overload_name: grad_output + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::slow_conv_transpose3d_backward.grad_output(Tensor grad_output, Tensor self, Tensor weight, int[3] kernel_size, int[3] stride, int[3] padding, int[3] output_padding, int[3] dilation, Tensor finput, Tensor fgrad_input, *, Tensor(a!)? grad_input, Tensor(b!)? grad_weight, Tensor(c!)? grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: true + name: grad_input + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + is_nullable: true + name: grad_weight + output: true + type: Tensor & + - allocate: true + annotation: c! + dynamic_type: Tensor + is_nullable: true + name: grad_bias + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_padding + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: finput + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: fgrad_input + type: const Tensor & + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, const Tensor &, const Tensor &, Tensor &, Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_padding + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: finput + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: fgrad_input + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: true + name: grad_input + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + is_nullable: true + name: grad_weight + output: true + type: Tensor & + - allocate: true + annotation: c! + dynamic_type: Tensor + is_nullable: true + name: grad_bias + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: grad_input + type: Tensor & + - dynamic_type: Tensor + name: grad_weight + type: Tensor & + - dynamic_type: Tensor + name: grad_bias + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: slow_conv_transpose3d_backward + operator_name: slow_conv_transpose3d_backward + overload_name: output_mask + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::slow_conv_transpose3d_backward.output_mask(Tensor grad_output, Tensor self, Tensor weight, int[3] kernel_size, int[3] stride, int[3] padding, int[3] output_padding, int[3] dilation, Tensor finput, Tensor fgrad_input, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_padding + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: finput + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: fgrad_input + type: const Tensor & + - annotation: null + dynamic_type: std::array + is_nullable: false + name: output_mask + type: std::array + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, const Tensor &, const Tensor &, std::array) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_padding + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: finput + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: fgrad_input + type: const Tensor & + - annotation: null + dynamic_type: std::array + is_nullable: false + name: output_mask + type: std::array + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + field_name: grad_input + name: grad_input + type: Tensor + - dynamic_type: Tensor + field_name: grad_weight + name: grad_weight + type: Tensor + - dynamic_type: Tensor + field_name: grad_bias + name: grad_bias + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: thnn_conv2d_out + operator_name: thnn_conv2d + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::thnn_conv2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: bias + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, IntArrayRef, const Tensor &, IntArrayRef, IntArrayRef, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: bias + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: thnn_conv2d + operator_name: thnn_conv2d + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::thnn_conv2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, IntArrayRef, const c10::optional&, IntArrayRef, IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: thnn_conv2d_forward_out + operator_name: thnn_conv2d_forward + overload_name: output + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::thnn_conv2d_forward.output(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding, *, Tensor(a!) output, Tensor(b!) finput, Tensor(c!) fgrad_input) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: output + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + is_nullable: false + name: finput + output: true + type: Tensor & + - allocate: true + annotation: c! + dynamic_type: Tensor + is_nullable: false + name: fgrad_input + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: bias + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, IntArrayRef, const Tensor &, IntArrayRef, IntArrayRef, Tensor &, Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: bias + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: output + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + is_nullable: false + name: finput + output: true + type: Tensor & + - allocate: true + annotation: c! + dynamic_type: Tensor + is_nullable: false + name: fgrad_input + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: output + type: Tensor & + - dynamic_type: Tensor + name: finput + type: Tensor & + - dynamic_type: Tensor + name: fgrad_input + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: thnn_conv2d_forward + operator_name: thnn_conv2d_forward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::thnn_conv2d_forward(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding) -> (Tensor output, Tensor finput, Tensor fgrad_input) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, IntArrayRef, const c10::optional&, IntArrayRef, IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + field_name: output + name: output + type: Tensor + - dynamic_type: Tensor + field_name: finput + name: finput + type: Tensor + - dynamic_type: Tensor + field_name: fgrad_input + name: fgrad_input + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: thnn_conv2d_backward_out + operator_name: thnn_conv2d_backward + overload_name: grad_input + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::thnn_conv2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, Tensor finput, Tensor fgrad_input, *, Tensor(a!)? grad_input, Tensor(b!)? grad_weight, Tensor(c!)? grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: true + name: grad_input + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + is_nullable: true + name: grad_weight + output: true + type: Tensor & + - allocate: true + annotation: c! + dynamic_type: Tensor + is_nullable: true + name: grad_bias + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: finput + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: fgrad_input + type: const Tensor & + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, const Tensor &, const Tensor &, Tensor &, Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: finput + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: fgrad_input + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: true + name: grad_input + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + is_nullable: true + name: grad_weight + output: true + type: Tensor & + - allocate: true + annotation: c! + dynamic_type: Tensor + is_nullable: true + name: grad_bias + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: grad_input + type: Tensor & + - dynamic_type: Tensor + name: grad_weight + type: Tensor & + - dynamic_type: Tensor + name: grad_bias + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: thnn_conv2d_backward + operator_name: thnn_conv2d_backward + overload_name: output_mask + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::thnn_conv2d_backward.output_mask(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, Tensor finput, Tensor fgrad_input, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: finput + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: fgrad_input + type: const Tensor & + - annotation: null + dynamic_type: std::array + is_nullable: false + name: output_mask + type: std::array + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, const Tensor &, const Tensor &, std::array) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: finput + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: fgrad_input + type: const Tensor & + - annotation: null + dynamic_type: std::array + is_nullable: false + name: output_mask + type: std::array + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + field_name: grad_input + name: grad_input + type: Tensor + - dynamic_type: Tensor + field_name: grad_weight + name: grad_weight + type: Tensor + - dynamic_type: Tensor + field_name: grad_bias + name: grad_bias + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: thnn_conv_depthwise2d_out + operator_name: thnn_conv_depthwise2d + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::thnn_conv_depthwise2d.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] dilation=1, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: bias + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: IntArrayRef + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, IntArrayRef, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: bias + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: IntArrayRef + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: thnn_conv_depthwise2d + operator_name: thnn_conv_depthwise2d + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::thnn_conv_depthwise2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] dilation=1) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: IntArrayRef + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, IntArrayRef, const c10::optional&, IntArrayRef, IntArrayRef, IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: thnn_conv_depthwise2d_forward_out + operator_name: thnn_conv_depthwise2d_forward + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::thnn_conv_depthwise2d_forward.out(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding, int[2] dilation, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: bias + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: IntArrayRef + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, IntArrayRef, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: bias + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: IntArrayRef + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: thnn_conv_depthwise2d_forward + operator_name: thnn_conv_depthwise2d_forward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::thnn_conv_depthwise2d_forward(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias, int[2] stride, int[2] padding, int[2] dilation) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: IntArrayRef + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, IntArrayRef, const c10::optional&, IntArrayRef, IntArrayRef, IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: thnn_conv_depthwise2d_backward_out + operator_name: thnn_conv_depthwise2d_backward + overload_name: grad_input + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::thnn_conv_depthwise2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, *, Tensor(a!)? grad_input, Tensor(b!)? grad_weight) -> (Tensor(a!), Tensor(b!)) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: true + name: grad_input + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + is_nullable: true + name: grad_weight + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: IntArrayRef + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: IntArrayRef + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: true + name: grad_input + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + is_nullable: true + name: grad_weight + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: grad_input + type: Tensor & + - dynamic_type: Tensor + name: grad_weight + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: thnn_conv_depthwise2d_backward + operator_name: thnn_conv_depthwise2d_backward + overload_name: output_mask + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::thnn_conv_depthwise2d_backward.output_mask(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool[2] output_mask) -> (Tensor grad_input, Tensor grad_weight) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: std::array + is_nullable: false + name: output_mask + type: std::array + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, std::array) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: std::array + is_nullable: false + name: output_mask + type: std::array + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + field_name: grad_input + name: grad_input + type: Tensor + - dynamic_type: Tensor + field_name: grad_weight + name: grad_weight + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: slow_conv3d_out + operator_name: slow_conv3d + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::slow_conv3d.out(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: bias + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 3 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 3 + type: IntArrayRef + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, IntArrayRef, const Tensor &, IntArrayRef, IntArrayRef, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: bias + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 3 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 3 + type: IntArrayRef + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: slow_conv3d + operator_name: slow_conv3d + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::slow_conv3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 3 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 3 + type: IntArrayRef + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, IntArrayRef, const c10::optional&, IntArrayRef, IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 3 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 3 + type: IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: slow_conv3d_forward_out + operator_name: slow_conv3d_forward + overload_name: output + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::slow_conv3d_forward.output(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, int[3] padding, *, Tensor(a!) output, Tensor(b!) finput, Tensor(c!) fgrad_input) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: output + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + is_nullable: false + name: finput + output: true + type: Tensor & + - allocate: true + annotation: c! + dynamic_type: Tensor + is_nullable: false + name: fgrad_input + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: bias + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 3 + type: IntArrayRef + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, IntArrayRef, const Tensor &, IntArrayRef, IntArrayRef, Tensor &, Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: bias + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 3 + type: IntArrayRef + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: output + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + is_nullable: false + name: finput + output: true + type: Tensor & + - allocate: true + annotation: c! + dynamic_type: Tensor + is_nullable: false + name: fgrad_input + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: output + type: Tensor & + - dynamic_type: Tensor + name: finput + type: Tensor & + - dynamic_type: Tensor + name: fgrad_input + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: slow_conv3d_forward + operator_name: slow_conv3d_forward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::slow_conv3d_forward(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias, int[3] stride, int[3] padding) -> (Tensor output, Tensor finput, Tensor fgrad_input) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 3 + type: IntArrayRef + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, IntArrayRef, const c10::optional&, IntArrayRef, IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 3 + type: IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + field_name: output + name: output + type: Tensor + - dynamic_type: Tensor + field_name: finput + name: finput + type: Tensor + - dynamic_type: Tensor + field_name: fgrad_input + name: fgrad_input + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: slow_conv3d_backward_out + operator_name: slow_conv3d_backward + overload_name: grad_input + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::slow_conv3d_backward.grad_input(Tensor grad_output, Tensor self, Tensor weight, int[3] kernel_size, int[3] stride, int[3] padding, Tensor finput, Tensor fgrad_input, *, Tensor(a!)? grad_input, Tensor(b!)? grad_weight, Tensor(c!)? grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!)) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: true + name: grad_input + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + is_nullable: true + name: grad_weight + output: true + type: Tensor & + - allocate: true + annotation: c! + dynamic_type: Tensor + is_nullable: true + name: grad_bias + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: finput + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: fgrad_input + type: const Tensor & + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, const Tensor &, const Tensor &, Tensor &, Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: finput + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: fgrad_input + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: true + name: grad_input + output: true + type: Tensor & + - allocate: true + annotation: b! + dynamic_type: Tensor + is_nullable: true + name: grad_weight + output: true + type: Tensor & + - allocate: true + annotation: c! + dynamic_type: Tensor + is_nullable: true + name: grad_bias + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: grad_input + type: Tensor & + - dynamic_type: Tensor + name: grad_weight + type: Tensor & + - dynamic_type: Tensor + name: grad_bias + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: slow_conv3d_backward + operator_name: slow_conv3d_backward + overload_name: output_mask + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::slow_conv3d_backward.output_mask(Tensor grad_output, Tensor self, Tensor weight, int[3] kernel_size, int[3] stride, int[3] padding, Tensor finput, Tensor fgrad_input, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: finput + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: fgrad_input + type: const Tensor & + - annotation: null + dynamic_type: std::array + is_nullable: false + name: output_mask + type: std::array + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, const Tensor &, const Tensor &, std::array) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: finput + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: fgrad_input + type: const Tensor & + - annotation: null + dynamic_type: std::array + is_nullable: false + name: output_mask + type: std::array + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + field_name: grad_input + name: grad_input + type: Tensor + - dynamic_type: Tensor + field_name: grad_weight + name: grad_weight + type: Tensor + - dynamic_type: Tensor + field_name: grad_bias + name: grad_bias + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: slow_conv_dilated2d + operator_name: slow_conv_dilated2d + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::slow_conv_dilated2d(Tensor self, Tensor weight, int[2] kernel_size, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] dilation=1) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: IntArrayRef + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, IntArrayRef, const c10::optional&, IntArrayRef, IntArrayRef, IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: slow_conv_dilated2d_backward + operator_name: slow_conv_dilated2d_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::slow_conv_dilated2d_backward(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: std::array + is_nullable: false + name: output_mask + type: std::array + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, std::array) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: std::array + is_nullable: false + name: output_mask + type: std::array + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + field_name: grad_input + name: grad_input + type: Tensor + - dynamic_type: Tensor + field_name: grad_weight + name: grad_weight + type: Tensor + - dynamic_type: Tensor + field_name: grad_bias + name: grad_bias + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: slow_conv_dilated3d + operator_name: slow_conv_dilated3d + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::slow_conv_dilated3d(Tensor self, Tensor weight, int[3] kernel_size, Tensor? bias=None, int[3] stride=1, int[3] padding=0, int[3] dilation=1) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 3 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 3 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 3 + type: IntArrayRef + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, IntArrayRef, const c10::optional&, IntArrayRef, IntArrayRef, IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: IntArrayRef + - annotation: null + default: '{}' + dynamic_type: Tensor + is_nullable: true + name: bias + type: const c10::optional& + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 3 + type: IntArrayRef + - annotation: null + default: 0 + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 3 + type: IntArrayRef + - annotation: null + default: 1 + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 3 + type: IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: slow_conv_dilated3d_backward + operator_name: slow_conv_dilated3d_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::slow_conv_dilated3d_backward(Tensor grad_output, Tensor self, Tensor weight, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias) + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: std::array + is_nullable: false + name: output_mask + type: std::array + schema_order_cpp_signature: std::tuple (const Tensor &, const Tensor &, const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, std::array) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: weight + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 3 + type: IntArrayRef + - annotation: null + dynamic_type: std::array + is_nullable: false + name: output_mask + type: std::array + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + field_name: grad_input + name: grad_input + type: Tensor + - dynamic_type: Tensor + field_name: grad_weight + name: grad_weight + type: Tensor + - dynamic_type: Tensor + field_name: grad_bias + name: grad_bias + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: col2im_out + operator_name: col2im + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::col2im.out(Tensor self, int[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + schema_order_cpp_signature: Tensor & (const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: col2im + operator_name: col2im + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::col2im(Tensor self, int[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: output_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: col2im_backward_out + operator_name: col2im_backward + overload_name: grad_input + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::col2im_backward.grad_input(Tensor grad_output, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + schema_order_cpp_signature: Tensor & (const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: grad_input + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: col2im_backward + operator_name: col2im_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::col2im_backward(Tensor grad_output, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: im2col_out + operator_name: im2col + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::im2col.out(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + schema_order_cpp_signature: Tensor & (const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: im2col + operator_name: im2col + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::im2col(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: im2col_backward_out + operator_name: im2col_backward + overload_name: grad_input + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::im2col_backward.grad_input(Tensor grad_output, int[2] input_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) grad_input) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: input_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + schema_order_cpp_signature: Tensor & (const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: input_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: grad_input + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: grad_input + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: im2col_backward + operator_name: im2col_backward + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::im2col_backward(Tensor grad_output, int[2] input_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: input_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + schema_order_cpp_signature: Tensor (const Tensor &, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef, IntArrayRef) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: grad_output + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: input_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: kernel_size + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: dilation + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: padding + size: 2 + type: IntArrayRef + - annotation: null + dynamic_type: IntArrayRef + is_nullable: false + name: stride + size: 2 + type: IntArrayRef + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: isfinite + operator_name: isfinite + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::isfinite(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: isinf + operator_name: isinf + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::isinf(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: false + with_gil: false + deprecated: false + has_math_kernel: false +- name: isposinf + operator_name: isposinf + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::isposinf(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: isposinf_out + operator_name: isposinf + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::isposinf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: isneginf + operator_name: isneginf + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::isneginf(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: isneginf_out + operator_name: isneginf + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::isneginf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _add_batch_dim + operator_name: _add_batch_dim + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_add_batch_dim(Tensor self, int batch_dim, int level) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: batch_dim + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: level + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, int64_t, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: batch_dim + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: level + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _remove_batch_dim + operator_name: _remove_batch_dim + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_remove_batch_dim(Tensor self, int level, int batch_size, int out_dim) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: level + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: batch_size + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: out_dim + type: int64_t + schema_order_cpp_signature: Tensor (const Tensor &, int64_t, int64_t, int64_t) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: level + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: batch_size + type: int64_t + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: out_dim + type: int64_t + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: fft_fft + operator_name: fft_fft + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::fft_fft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: n + type: c10::optional + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: std::string + is_nullable: true + name: norm + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, c10::optional, int64_t, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: n + type: c10::optional + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: std::string + is_nullable: true + name: norm + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: fft + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: fft_ifft + operator_name: fft_ifft + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::fft_ifft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: n + type: c10::optional + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: std::string + is_nullable: true + name: norm + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, c10::optional, int64_t, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: n + type: c10::optional + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: std::string + is_nullable: true + name: norm + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: fft + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: fft_rfft + operator_name: fft_rfft + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::fft_rfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: n + type: c10::optional + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: std::string + is_nullable: true + name: norm + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, c10::optional, int64_t, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: n + type: c10::optional + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: std::string + is_nullable: true + name: norm + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: fft + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: fft_irfft + operator_name: fft_irfft + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::fft_irfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: n + type: c10::optional + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: std::string + is_nullable: true + name: norm + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, c10::optional, int64_t, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: n + type: c10::optional + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: std::string + is_nullable: true + name: norm + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: fft + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: fft_hfft + operator_name: fft_hfft + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::fft_hfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: n + type: c10::optional + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: std::string + is_nullable: true + name: norm + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, c10::optional, int64_t, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: n + type: c10::optional + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: std::string + is_nullable: true + name: norm + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: fft + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: fft_ihfft + operator_name: fft_ihfft + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::fft_ihfft(Tensor self, int? n=None, int dim=-1, str? norm=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: n + type: c10::optional + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: std::string + is_nullable: true + name: norm + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, c10::optional, int64_t, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: int64_t + is_nullable: true + name: n + type: c10::optional + - annotation: null + default: -1 + dynamic_type: int64_t + is_nullable: false + name: dim + type: int64_t + - annotation: null + default: c10::nullopt + dynamic_type: std::string + is_nullable: true + name: norm + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: fft + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: fft_fftn + operator_name: fft_fftn + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::fft_fftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: IntArrayRef + is_nullable: true + name: s + size: 1 + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: IntArrayRef + is_nullable: true + name: dim + size: 1 + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: std::string + is_nullable: true + name: norm + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: IntArrayRef + is_nullable: true + name: s + size: 1 + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: IntArrayRef + is_nullable: true + name: dim + size: 1 + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: std::string + is_nullable: true + name: norm + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: fft + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: fft_ifftn + operator_name: fft_ifftn + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::fft_ifftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: IntArrayRef + is_nullable: true + name: s + size: 1 + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: IntArrayRef + is_nullable: true + name: dim + size: 1 + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: std::string + is_nullable: true + name: norm + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: IntArrayRef + is_nullable: true + name: s + size: 1 + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: IntArrayRef + is_nullable: true + name: dim + size: 1 + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: std::string + is_nullable: true + name: norm + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: fft + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: fft_rfftn + operator_name: fft_rfftn + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::fft_rfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: IntArrayRef + is_nullable: true + name: s + size: 1 + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: IntArrayRef + is_nullable: true + name: dim + size: 1 + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: std::string + is_nullable: true + name: norm + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: IntArrayRef + is_nullable: true + name: s + size: 1 + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: IntArrayRef + is_nullable: true + name: dim + size: 1 + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: std::string + is_nullable: true + name: norm + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: fft + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: fft_irfftn + operator_name: fft_irfftn + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::fft_irfftn(Tensor self, int[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: IntArrayRef + is_nullable: true + name: s + size: 1 + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: IntArrayRef + is_nullable: true + name: dim + size: 1 + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: std::string + is_nullable: true + name: norm + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, c10::optional, c10::optional, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: IntArrayRef + is_nullable: true + name: s + size: 1 + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: IntArrayRef + is_nullable: true + name: dim + size: 1 + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: std::string + is_nullable: true + name: norm + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: fft + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: fft + operator_name: fft + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::fft(Tensor self, int signal_ndim, bool normalized=False) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: signal_ndim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: normalized + type: bool + schema_order_cpp_signature: Tensor (const Tensor &, int64_t, bool) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: int64_t + is_nullable: false + name: signal_ndim + type: int64_t + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: normalized + type: bool + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: linalg_det + operator_name: linalg_det + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::linalg_det(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: det + operator_name: det + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::det(Tensor self) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: outer + operator_name: outer + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::outer(Tensor self, Tensor vec2) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: vec2 + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: vec2 + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: outer_out + operator_name: outer + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::outer.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: vec2 + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: vec2 + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: ger + operator_name: ger + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::ger(Tensor self, Tensor vec2) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: vec2 + type: const Tensor & + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: vec2 + type: const Tensor & + method_of: + - Type + - Tensor + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: ger_out + operator_name: ger + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::ger.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: vec2 + type: const Tensor & + schema_order_cpp_signature: Tensor & (const Tensor &, const Tensor &, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: vec2 + type: const Tensor & + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: linalg_norm + operator_name: linalg_norm + overload_name: '' + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::linalg_norm(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: Scalar + is_nullable: true + name: ord + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: IntArrayRef + is_nullable: true + name: dim + size: 1 + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, c10::optional, c10::optional, bool, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: Scalar + is_nullable: true + name: ord + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: IntArrayRef + is_nullable: true + name: dim + size: 1 + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: linalg_norm + operator_name: linalg_norm + overload_name: ord_str + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::linalg_norm.ord_str(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: std::string + is_nullable: false + name: ord + type: std::string + - annotation: null + default: c10::nullopt + dynamic_type: IntArrayRef + is_nullable: true + name: dim + size: 1 + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, std::string, c10::optional, bool, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: std::string + is_nullable: false + name: ord + type: std::string + - annotation: null + default: c10::nullopt + dynamic_type: IntArrayRef + is_nullable: true + name: dim + size: 1 + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: linalg_norm_out + operator_name: linalg_norm + overload_name: out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::linalg_norm.out(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: Scalar + is_nullable: true + name: ord + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: IntArrayRef + is_nullable: true + name: dim + size: 1 + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: Tensor & (const Tensor &, c10::optional, c10::optional, bool, c10::optional, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + default: c10::nullopt + dynamic_type: Scalar + is_nullable: true + name: ord + type: c10::optional + - annotation: null + default: c10::nullopt + dynamic_type: IntArrayRef + is_nullable: true + name: dim + size: 1 + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: linalg_norm_out + operator_name: linalg_norm + overload_name: ord_str_out + use_c10_dispatcher: with_codegenerated_unboxing_wrapper + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::linalg_norm.ord_str_out(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) + arguments: + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: std::string + is_nullable: false + name: ord + type: std::string + - annotation: null + default: c10::nullopt + dynamic_type: IntArrayRef + is_nullable: true + name: dim + size: 1 + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + schema_order_cpp_signature: Tensor & (const Tensor &, std::string, c10::optional, bool, c10::optional, Tensor &) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: std::string + is_nullable: false + name: ord + type: std::string + - annotation: null + default: c10::nullopt + dynamic_type: IntArrayRef + is_nullable: true + name: dim + size: 1 + type: c10::optional + - annotation: null + default: false + dynamic_type: bool + is_nullable: false + name: keepdim + type: bool + - annotation: null + default: c10::nullopt + dynamic_type: ScalarType + is_nullable: true + kwarg_only: true + name: dtype + type: c10::optional + - allocate: true + annotation: a! + dynamic_type: Tensor + is_nullable: false + name: out + output: true + type: Tensor & + method_of: + - Type + - namespace + mode: native + python_module: linalg + returns: + - dynamic_type: Tensor + name: out + type: Tensor & + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _test_serialization_subcmul + operator_name: _test_serialization_subcmul + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_test_serialization_subcmul(Tensor self, Tensor other, Scalar alpha=1) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + name: alpha + type: Scalar + schema_order_cpp_signature: Tensor (const Tensor &, const Tensor &, Scalar) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: self + type: const Tensor & + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: other + type: const Tensor & + - annotation: null + default: 1 + dynamic_type: Scalar + is_nullable: false + name: alpha + type: Scalar + method_of: + - Type + - namespace + mode: native + python_module: '' + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: false + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _test_optional_intlist + operator_name: _test_optional_intlist + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_test_optional_intlist(Tensor values, int[]? addends) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: values + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: true + name: addends + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: values + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: true + name: addends + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _test_optional_filled_intlist + operator_name: _test_optional_filled_intlist + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_test_optional_filled_intlist(Tensor values, int[2]? addends) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: values + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: true + name: addends + size: 2 + type: c10::optional + schema_order_cpp_signature: Tensor (const Tensor &, c10::optional) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: values + type: const Tensor & + - annotation: null + dynamic_type: IntArrayRef + is_nullable: true + name: addends + size: 2 + type: c10::optional + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false +- name: _test_optional_floatlist + operator_name: _test_optional_floatlist + overload_name: '' + use_c10_dispatcher: full + manual_kernel_registration: false + category_override: '' + matches_jit_signature: true + schema_string: aten::_test_optional_floatlist(Tensor values, float[]? addends) -> Tensor + arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: values + type: const Tensor & + - annotation: null + dynamic_type: ArrayRef + is_nullable: true + name: addends + type: c10::optional> + schema_order_cpp_signature: Tensor (const Tensor &, c10::optional>) + schema_order_arguments: + - annotation: null + dynamic_type: Tensor + is_nullable: false + name: values + type: const Tensor & + - annotation: null + dynamic_type: ArrayRef + is_nullable: true + name: addends + type: c10::optional> + method_of: + - Type + - namespace + mode: native + python_module: nn + returns: + - dynamic_type: Tensor + name: result + type: Tensor + inplace: false + is_factory_method: false + abstract: true + device_guard: true + with_gil: false + deprecated: false + has_math_kernel: false diff --git a/libtch/c-generated.go b/libtch/c-generated.go index e7361bc..cb34472 100644 --- a/libtch/c-generated.go +++ b/libtch/c-generated.go @@ -2,5485 +2,6719 @@ package libtch // NOTE. THIS FILE IS AUTOMATICALLY GENERATED, DO NOT EDIT BY HAND! -//#include "stdbool.h" -//#include "torch_api.h" +//#include "stdbool.h" +//#include "torch_api.h" import "C" import "unsafe" -func Atg__And_(ptr *Ctensor, self Ctensor, other Cscalar) { - C.atg___and__(ptr, self, other) +func Atg__And_(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg___and__(ptr, self, other ) } -func Atg__And1(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg___and__1(ptr, self, other) +func Atg__And1(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg___and__1(ptr, self, other) } -func Atg__Iand_(ptr *Ctensor, self Ctensor, other Cscalar) { - C.atg___iand__(ptr, self, other) +func Atg__Iand_(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg___iand__(ptr, self, other ) } -func Atg__Iand1(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg___iand__1(ptr, self, other) +func Atg__Iand1(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg___iand__1(ptr, self, other) } -func Atg__Ilshift_(ptr *Ctensor, self Ctensor, other Cscalar) { - C.atg___ilshift__(ptr, self, other) +func Atg__Ilshift_(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg___ilshift__(ptr, self, other ) } -func Atg__Ilshift1(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg___ilshift__1(ptr, self, other) +func Atg__Ilshift1(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg___ilshift__1(ptr, self, other) } -func Atg__Ior_(ptr *Ctensor, self Ctensor, other Cscalar) { - C.atg___ior__(ptr, self, other) +func Atg__Ior_(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg___ior__(ptr, self, other ) } -func Atg__Ior1(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg___ior__1(ptr, self, other) +func Atg__Ior1(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg___ior__1(ptr, self, other) } -func Atg__Irshift_(ptr *Ctensor, self Ctensor, other Cscalar) { - C.atg___irshift__(ptr, self, other) +func Atg__Irshift_(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg___irshift__(ptr, self, other ) } -func Atg__Irshift1(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg___irshift__1(ptr, self, other) +func Atg__Irshift1(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg___irshift__1(ptr, self, other) } -func Atg__Ixor_(ptr *Ctensor, self Ctensor, other Cscalar) { - C.atg___ixor__(ptr, self, other) +func Atg__Ixor_(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg___ixor__(ptr, self, other ) } -func Atg__Ixor1(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg___ixor__1(ptr, self, other) +func Atg__Ixor1(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg___ixor__1(ptr, self, other) } -func Atg__Lshift_(ptr *Ctensor, self Ctensor, other Cscalar) { - C.atg___lshift__(ptr, self, other) +func Atg__Lshift_(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg___lshift__(ptr, self, other ) } -func Atg__Lshift1(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg___lshift__1(ptr, self, other) +func Atg__Lshift1(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg___lshift__1(ptr, self, other) } -func Atg__Or_(ptr *Ctensor, self Ctensor, other Cscalar) { - C.atg___or__(ptr, self, other) +func Atg__Or_(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg___or__(ptr, self, other ) } -func Atg__Or1(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg___or__1(ptr, self, other) +func Atg__Or1(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg___or__1(ptr, self, other) } -func Atg__Rshift_(ptr *Ctensor, self Ctensor, other Cscalar) { - C.atg___rshift__(ptr, self, other) +func Atg__Rshift_(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg___rshift__(ptr, self, other ) } -func Atg__Rshift1(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg___rshift__1(ptr, self, other) +func Atg__Rshift1(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg___rshift__1(ptr, self, other) } -func Atg__Xor_(ptr *Ctensor, self Ctensor, other Cscalar) { - C.atg___xor__(ptr, self, other) +func Atg__Xor_(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg___xor__(ptr, self, other ) } -func Atg__Xor1(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg___xor__1(ptr, self, other) +func Atg__Xor1(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg___xor__1(ptr, self, other) } -func Atg_AdaptiveAvgPool2d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int) { - coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) - coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) - C.atg__adaptive_avg_pool2d(ptr, self, coutputSizeDataPtr, coutputSizeLen) +func Atg_AdaptiveAvgPool2d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +C.atg__adaptive_avg_pool2d(ptr, self, coutputSizeDataPtr, coutputSizeLen) } -func Atg_AdaptiveAvgPool2dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor) { - C.atg__adaptive_avg_pool2d_backward(ptr, gradOutput, self) +func Atg_AdaptiveAvgPool2dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor){ +C.atg__adaptive_avg_pool2d_backward(ptr, gradOutput, self) } -func Atg_Addr(ptr *Ctensor, self Ctensor, vec1 Ctensor, vec2 Ctensor) { - C.atg__addr(ptr, self, vec1, vec2) +func Atg_AddBatchDim(ptr *Ctensor, self Ctensor, batchDim int64, level int64){ +cbatchDim := *(*C.int64_t)(unsafe.Pointer(&batchDim)) +clevel := *(*C.int64_t)(unsafe.Pointer(&level)) +C.atg__add_batch_dim(ptr, self, cbatchDim, clevel) } -func Atg_Addr_(ptr *Ctensor, self Ctensor, vec1 Ctensor, vec2 Ctensor) { - C.atg__addr_(ptr, self, vec1, vec2) +func Atg_AddRelu(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg__add_relu(ptr, self, other) } -func Atg_AddrOut(ptr *Ctensor, out Ctensor, self Ctensor, vec1 Ctensor, vec2 Ctensor) { - C.atg__addr_out(ptr, out, self, vec1, vec2) -} -func Atg_AmpUpdateScale(ptr *Ctensor, growthTracker Ctensor, currentScale Ctensor, foundInf Ctensor, scaleGrowthFactor float64, scaleBackoffFactor float64, growthInterval int64) { - cscaleGrowthFactor := *(*C.double)(unsafe.Pointer(&scaleGrowthFactor)) - cscaleBackoffFactor := *(*C.double)(unsafe.Pointer(&scaleBackoffFactor)) - cgrowthInterval := *(*C.int64_t)(unsafe.Pointer(&growthInterval)) - C.atg__amp_update_scale(ptr, growthTracker, currentScale, foundInf, cscaleGrowthFactor, cscaleBackoffFactor, cgrowthInterval) -} -func Atg_BaddbmmMkl_(ptr *Ctensor, self Ctensor, batch1 Ctensor, batch2 Ctensor) { - C.atg__baddbmm_mkl_(ptr, self, batch1, batch2) -} -func Atg_CastByte(ptr *Ctensor, self Ctensor, nonBlocking int32) { - cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) - C.atg__cast_byte(ptr, self, cnonBlocking) -} -func Atg_CastChar(ptr *Ctensor, self Ctensor, nonBlocking int32) { - cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) - C.atg__cast_char(ptr, self, cnonBlocking) -} -func Atg_CastDouble(ptr *Ctensor, self Ctensor, nonBlocking int32) { - cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) - C.atg__cast_double(ptr, self, cnonBlocking) -} -func Atg_CastFloat(ptr *Ctensor, self Ctensor, nonBlocking int32) { - cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) - C.atg__cast_float(ptr, self, cnonBlocking) -} -func Atg_CastHalf(ptr *Ctensor, self Ctensor, nonBlocking int32) { - cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) - C.atg__cast_half(ptr, self, cnonBlocking) -} -func Atg_CastInt(ptr *Ctensor, self Ctensor, nonBlocking int32) { - cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) - C.atg__cast_int(ptr, self, cnonBlocking) -} -func Atg_CastLong(ptr *Ctensor, self Ctensor, nonBlocking int32) { - cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) - C.atg__cast_long(ptr, self, cnonBlocking) -} -func Atg_CastShort(ptr *Ctensor, self Ctensor, nonBlocking int32) { - cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) - C.atg__cast_short(ptr, self, cnonBlocking) -} -func Atg_Cat(ptr *Ctensor, tensorsData []Ctensor, tensorsLen int, dim int64) { - ctensorsDataPtr := (*Ctensor)(unsafe.Pointer(&tensorsData[0])) - ctensorsLen := *(*C.int)(unsafe.Pointer(&tensorsLen)) - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - C.atg__cat(ptr, ctensorsDataPtr, ctensorsLen, cdim) -} -func Atg_CatOut(ptr *Ctensor, out Ctensor, tensorsData []Ctensor, tensorsLen int, dim int64) { - ctensorsDataPtr := (*Ctensor)(unsafe.Pointer(&tensorsData[0])) - ctensorsLen := *(*C.int)(unsafe.Pointer(&tensorsLen)) - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - C.atg__cat_out(ptr, out, ctensorsDataPtr, ctensorsLen, cdim) -} -func Atg_CdistBackward(ptr *Ctensor, grad Ctensor, x1 Ctensor, x2 Ctensor, p float64, cdist Ctensor) { - cp := *(*C.double)(unsafe.Pointer(&p)) - C.atg__cdist_backward(ptr, grad, x1, x2, cp, cdist) -} -func Atg_CholeskyHelper(ptr *Ctensor, self Ctensor, upper int32) { - cupper := *(*C.int)(unsafe.Pointer(&upper)) - C.atg__cholesky_helper(ptr, self, cupper) -} -func Atg_CholeskySolveHelper(ptr *Ctensor, self Ctensor, a Ctensor, upper int32) { - cupper := *(*C.int)(unsafe.Pointer(&upper)) - C.atg__cholesky_solve_helper(ptr, self, a, cupper) -} -func Atg_Coalesced_(ptr *Ctensor, self Ctensor, coalesced int32) { - ccoalesced := *(*C.int)(unsafe.Pointer(&coalesced)) - C.atg__coalesced_(ptr, self, ccoalesced) -} -func Atg_Convolution(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, transposed int32, outputPaddingData []int64, outputPaddingLen int, groups int64, benchmark int32, deterministic int32, cudnnEnabled int32) { - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) - cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) - ctransposed := *(*C.int)(unsafe.Pointer(&transposed)) - coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) - coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) - cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) - cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) - cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) - ccudnnEnabled := *(*C.int)(unsafe.Pointer(&cudnnEnabled)) - C.atg__convolution(ptr, input, weight, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, ctransposed, coutputPaddingDataPtr, coutputPaddingLen, cgroups, cbenchmark, cdeterministic, ccudnnEnabled) -} -func Atg_ConvolutionNogroup(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, transposed int32, outputPaddingData []int64, outputPaddingLen int) { - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) - cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) - ctransposed := *(*C.int)(unsafe.Pointer(&transposed)) - coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) - coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) - C.atg__convolution_nogroup(ptr, input, weight, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, ctransposed, coutputPaddingDataPtr, coutputPaddingLen) -} -func Atg_CopyFrom(ptr *Ctensor, self Ctensor, dst Ctensor, nonBlocking int32) { - cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) - C.atg__copy_from(ptr, self, dst, cnonBlocking) -} -func Atg_CtcLoss(ptr *Ctensor, logProbs Ctensor, targets Ctensor, inputLengthsData []int64, inputLengthsLen int, targetLengthsData []int64, targetLengthsLen int, blank int64, zeroInfinity int32) { - cinputLengthsDataPtr := (*C.int64_t)(unsafe.Pointer(&inputLengthsData[0])) - cinputLengthsLen := *(*C.int)(unsafe.Pointer(&inputLengthsLen)) - ctargetLengthsDataPtr := (*C.int64_t)(unsafe.Pointer(&targetLengthsData[0])) - ctargetLengthsLen := *(*C.int)(unsafe.Pointer(&targetLengthsLen)) - cblank := *(*C.int64_t)(unsafe.Pointer(&blank)) - czeroInfinity := *(*C.int)(unsafe.Pointer(&zeroInfinity)) - C.atg__ctc_loss(ptr, logProbs, targets, cinputLengthsDataPtr, cinputLengthsLen, ctargetLengthsDataPtr, ctargetLengthsLen, cblank, czeroInfinity) -} -func Atg_CtcLossBackward(ptr *Ctensor, grad Ctensor, logProbs Ctensor, targets Ctensor, inputLengthsData []int64, inputLengthsLen int, targetLengthsData []int64, targetLengthsLen int, negLogLikelihood Ctensor, logAlpha Ctensor, blank int64, zeroInfinity int32) { - cinputLengthsDataPtr := (*C.int64_t)(unsafe.Pointer(&inputLengthsData[0])) - cinputLengthsLen := *(*C.int)(unsafe.Pointer(&inputLengthsLen)) - ctargetLengthsDataPtr := (*C.int64_t)(unsafe.Pointer(&targetLengthsData[0])) - ctargetLengthsLen := *(*C.int)(unsafe.Pointer(&targetLengthsLen)) - cblank := *(*C.int64_t)(unsafe.Pointer(&blank)) - czeroInfinity := *(*C.int)(unsafe.Pointer(&zeroInfinity)) - C.atg__ctc_loss_backward(ptr, grad, logProbs, targets, cinputLengthsDataPtr, cinputLengthsLen, ctargetLengthsDataPtr, ctargetLengthsLen, negLogLikelihood, logAlpha, cblank, czeroInfinity) -} -func Atg_CudnnCtcLoss(ptr *Ctensor, logProbs Ctensor, targets Ctensor, inputLengthsData []int64, inputLengthsLen int, targetLengthsData []int64, targetLengthsLen int, blank int64, deterministic int32, zeroInfinity int32) { - cinputLengthsDataPtr := (*C.int64_t)(unsafe.Pointer(&inputLengthsData[0])) - cinputLengthsLen := *(*C.int)(unsafe.Pointer(&inputLengthsLen)) - ctargetLengthsDataPtr := (*C.int64_t)(unsafe.Pointer(&targetLengthsData[0])) - ctargetLengthsLen := *(*C.int)(unsafe.Pointer(&targetLengthsLen)) - cblank := *(*C.int64_t)(unsafe.Pointer(&blank)) - cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) - czeroInfinity := *(*C.int)(unsafe.Pointer(&zeroInfinity)) - C.atg__cudnn_ctc_loss(ptr, logProbs, targets, cinputLengthsDataPtr, cinputLengthsLen, ctargetLengthsDataPtr, ctargetLengthsLen, cblank, cdeterministic, czeroInfinity) -} -func Atg_CudnnInitDropoutState(ptr *Ctensor, dropout float64, train int32, dropoutSeed int64, optionsKind int32, optionsDevice int32) { - cdropout := *(*C.double)(unsafe.Pointer(&dropout)) - ctrain := *(*C.int)(unsafe.Pointer(&train)) - cdropoutSeed := *(*C.int64_t)(unsafe.Pointer(&dropoutSeed)) - coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) - coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) - C.atg__cudnn_init_dropout_state(ptr, cdropout, ctrain, cdropoutSeed, coptionsKind, coptionsDevice) -} -func Atg_CudnnRnn(ptr *Ctensor, input Ctensor, weightData []Ctensor, weightLen int, weightStride0 int64, weightBuf Ctensor, hx Ctensor, cx Ctensor, mode int64, hiddenSize int64, numLayers int64, batchFirst int32, dropout float64, train int32, bidirectional int32, batchSizesData []int64, batchSizesLen int, dropoutState Ctensor) { - cweightDataPtr := (*Ctensor)(unsafe.Pointer(&weightData[0])) - cweightLen := *(*C.int)(unsafe.Pointer(&weightLen)) - cweightStride0 := *(*C.int64_t)(unsafe.Pointer(&weightStride0)) - cmode := *(*C.int64_t)(unsafe.Pointer(&mode)) - chiddenSize := *(*C.int64_t)(unsafe.Pointer(&hiddenSize)) - cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) - cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst)) - cdropout := *(*C.double)(unsafe.Pointer(&dropout)) - ctrain := *(*C.int)(unsafe.Pointer(&train)) - cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) - cbatchSizesDataPtr := (*C.int64_t)(unsafe.Pointer(&batchSizesData[0])) - cbatchSizesLen := *(*C.int)(unsafe.Pointer(&batchSizesLen)) - C.atg__cudnn_rnn(ptr, input, cweightDataPtr, cweightLen, cweightStride0, weightBuf, hx, cx, cmode, chiddenSize, cnumLayers, cbatchFirst, cdropout, ctrain, cbidirectional, cbatchSizesDataPtr, cbatchSizesLen, dropoutState) -} -func Atg_CudnnRnnFlattenWeight(ptr *Ctensor, weightArrData []Ctensor, weightArrLen int, weightStride0 int64, inputSize int64, mode int64, hiddenSize int64, numLayers int64, batchFirst int32, bidirectional int32) { - cweightArrDataPtr := (*Ctensor)(unsafe.Pointer(&weightArrData[0])) - cweightArrLen := *(*C.int)(unsafe.Pointer(&weightArrLen)) - cweightStride0 := *(*C.int64_t)(unsafe.Pointer(&weightStride0)) - cinputSize := *(*C.int64_t)(unsafe.Pointer(&inputSize)) - cmode := *(*C.int64_t)(unsafe.Pointer(&mode)) - chiddenSize := *(*C.int64_t)(unsafe.Pointer(&hiddenSize)) - cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) - cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst)) - cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) - C.atg__cudnn_rnn_flatten_weight(ptr, cweightArrDataPtr, cweightArrLen, cweightStride0, cinputSize, cmode, chiddenSize, cnumLayers, cbatchFirst, cbidirectional) -} -func Atg_Cumprod(ptr *Ctensor, self Ctensor, dim int64) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - C.atg__cumprod(ptr, self, cdim) -} -func Atg_CumprodOut(ptr *Ctensor, out Ctensor, self Ctensor, dim int64) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - C.atg__cumprod_out(ptr, out, self, cdim) -} -func Atg_Cumsum(ptr *Ctensor, self Ctensor, dim int64) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - C.atg__cumsum(ptr, self, cdim) -} -func Atg_CumsumOut(ptr *Ctensor, out Ctensor, self Ctensor, dim int64) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - C.atg__cumsum_out(ptr, out, self, cdim) -} -func Atg_DimArange(ptr *Ctensor, like Ctensor, dim int64) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - C.atg__dim_arange(ptr, like, cdim) -} -func Atg_DirichletGrad(ptr *Ctensor, x Ctensor, alpha Ctensor, total Ctensor) { - C.atg__dirichlet_grad(ptr, x, alpha, total) -} -func Atg_EmbeddingBag(ptr *Ctensor, weight Ctensor, indices Ctensor, offsets Ctensor, scaleGradByFreq int32, mode int64, sparse int32, perSampleWeights Ctensor, includeLastOffset int32) { - cscaleGradByFreq := *(*C.int)(unsafe.Pointer(&scaleGradByFreq)) - cmode := *(*C.int64_t)(unsafe.Pointer(&mode)) - csparse := *(*C.int)(unsafe.Pointer(&sparse)) - cincludeLastOffset := *(*C.int)(unsafe.Pointer(&includeLastOffset)) - C.atg__embedding_bag(ptr, weight, indices, offsets, cscaleGradByFreq, cmode, csparse, perSampleWeights, cincludeLastOffset) -} -func Atg_EmbeddingBagBackward(ptr *Ctensor, grad Ctensor, indices Ctensor, offsets Ctensor, offset2bag Ctensor, bagSize Ctensor, maximumIndices Ctensor, numWeights int64, scaleGradByFreq int32, mode int64, sparse int32, perSampleWeights Ctensor) { - cnumWeights := *(*C.int64_t)(unsafe.Pointer(&numWeights)) - cscaleGradByFreq := *(*C.int)(unsafe.Pointer(&scaleGradByFreq)) - cmode := *(*C.int64_t)(unsafe.Pointer(&mode)) - csparse := *(*C.int)(unsafe.Pointer(&sparse)) - C.atg__embedding_bag_backward(ptr, grad, indices, offsets, offset2bag, bagSize, maximumIndices, cnumWeights, cscaleGradByFreq, cmode, csparse, perSampleWeights) -} -func Atg_EmbeddingBagDenseBackward(ptr *Ctensor, grad Ctensor, indices Ctensor, offsets Ctensor, offset2bag Ctensor, bagSize Ctensor, maximumIndices Ctensor, numWeights int64, scaleGradByFreq int32, mode int64, perSampleWeights Ctensor) { - cnumWeights := *(*C.int64_t)(unsafe.Pointer(&numWeights)) - cscaleGradByFreq := *(*C.int)(unsafe.Pointer(&scaleGradByFreq)) - cmode := *(*C.int64_t)(unsafe.Pointer(&mode)) - C.atg__embedding_bag_dense_backward(ptr, grad, indices, offsets, offset2bag, bagSize, maximumIndices, cnumWeights, cscaleGradByFreq, cmode, perSampleWeights) -} -func Atg_EmbeddingBagPerSampleWeightsBackward(ptr *Ctensor, grad Ctensor, weight Ctensor, indices Ctensor, offsets Ctensor, offset2bag Ctensor, mode int64) { - cmode := *(*C.int64_t)(unsafe.Pointer(&mode)) - C.atg__embedding_bag_per_sample_weights_backward(ptr, grad, weight, indices, offsets, offset2bag, cmode) -} -func Atg_EmbeddingBagSparseBackward(ptr *Ctensor, grad Ctensor, indices Ctensor, offsets Ctensor, offset2bag Ctensor, bagSize Ctensor, numWeights int64, scaleGradByFreq int32, mode int64, perSampleWeights Ctensor) { - cnumWeights := *(*C.int64_t)(unsafe.Pointer(&numWeights)) - cscaleGradByFreq := *(*C.int)(unsafe.Pointer(&scaleGradByFreq)) - cmode := *(*C.int64_t)(unsafe.Pointer(&mode)) - C.atg__embedding_bag_sparse_backward(ptr, grad, indices, offsets, offset2bag, bagSize, cnumWeights, cscaleGradByFreq, cmode, perSampleWeights) -} -func Atg_EmptyAffineQuantized(ptr *Ctensor, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32, scale float64, zeroPoint int64) { - csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) - csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) - coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) - coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) - cscale := *(*C.double)(unsafe.Pointer(&scale)) - czeroPoint := *(*C.int64_t)(unsafe.Pointer(&zeroPoint)) - C.atg__empty_affine_quantized(ptr, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice, cscale, czeroPoint) -} -func Atg_EmptyPerChannelAffineQuantized(ptr *Ctensor, sizeData []int64, sizeLen int, scales Ctensor, zeroPoints Ctensor, axis int64, optionsKind int32, optionsDevice int32) { - csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) - csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) - caxis := *(*C.int64_t)(unsafe.Pointer(&axis)) - coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) - coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) - C.atg__empty_per_channel_affine_quantized(ptr, csizeDataPtr, csizeLen, scales, zeroPoints, caxis, coptionsKind, coptionsDevice) -} -func Atg_FftWithSize(ptr *Ctensor, self Ctensor, signalNdim int64, complexInput int32, complexOutput int32, inverse int32, checkedSignalSizesData []int64, checkedSignalSizesLen int, normalized int32, onesided int32, outputSizesData []int64, outputSizesLen int) { - csignalNdim := *(*C.int64_t)(unsafe.Pointer(&signalNdim)) - ccomplexInput := *(*C.int)(unsafe.Pointer(&complexInput)) - ccomplexOutput := *(*C.int)(unsafe.Pointer(&complexOutput)) - cinverse := *(*C.int)(unsafe.Pointer(&inverse)) - ccheckedSignalSizesDataPtr := (*C.int64_t)(unsafe.Pointer(&checkedSignalSizesData[0])) - ccheckedSignalSizesLen := *(*C.int)(unsafe.Pointer(&checkedSignalSizesLen)) - cnormalized := *(*C.int)(unsafe.Pointer(&normalized)) - conesided := *(*C.int)(unsafe.Pointer(&onesided)) - coutputSizesDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizesData[0])) - coutputSizesLen := *(*C.int)(unsafe.Pointer(&outputSizesLen)) - C.atg__fft_with_size(ptr, self, csignalNdim, ccomplexInput, ccomplexOutput, cinverse, ccheckedSignalSizesDataPtr, ccheckedSignalSizesLen, cnormalized, conesided, coutputSizesDataPtr, coutputSizesLen) -} -func Atg_FusedDropout(ptr *Ctensor, self Ctensor, p float64) { - cp := *(*C.double)(unsafe.Pointer(&p)) - C.atg__fused_dropout(ptr, self, cp) -} -func Atg_GatherSparseBackward(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, grad Ctensor) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - C.atg__gather_sparse_backward(ptr, self, cdim, index, grad) -} -func Atg_IndexCopy_(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, source Ctensor) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - C.atg__index_copy_(ptr, self, cdim, index, source) -} -func Atg_IndexPutImpl_(ptr *Ctensor, self Ctensor, indicesData []Ctensor, indicesLen int, values Ctensor, accumulate int32, unsafety int32) { - cindicesDataPtr := (*Ctensor)(unsafe.Pointer(&indicesData[0])) - cindicesLen := *(*C.int)(unsafe.Pointer(&indicesLen)) - caccumulate := *(*C.int)(unsafe.Pointer(&accumulate)) - cunsafety := *(*C.int)(unsafe.Pointer(&unsafety)) - C.atg__index_put_impl_(ptr, self, cindicesDataPtr, cindicesLen, values, caccumulate, cunsafety) -} -func Atg_Indices(ptr *Ctensor, self Ctensor) { - C.atg__indices(ptr, self) -} -func Atg_InverseHelper(ptr *Ctensor, self Ctensor) { - C.atg__inverse_helper(ptr, self) -} -func Atg_LogSoftmax(ptr *Ctensor, self Ctensor, dim int64, halfToFloat int32) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - chalfToFloat := *(*C.int)(unsafe.Pointer(&halfToFloat)) - C.atg__log_softmax(ptr, self, cdim, chalfToFloat) -} -func Atg_LogSoftmaxBackwardData(ptr *Ctensor, gradOutput Ctensor, output Ctensor, dim int64, self Ctensor) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - C.atg__log_softmax_backward_data(ptr, gradOutput, output, cdim, self) -} -func Atg_LuSolveHelper(ptr *Ctensor, self Ctensor, lUData Ctensor, lUPivots Ctensor) { - C.atg__lu_solve_helper(ptr, self, lUData, lUPivots) -} -func Atg_LuWithInfo(ptr *Ctensor, self Ctensor, pivot int32, checkErrors int32) { - cpivot := *(*C.int)(unsafe.Pointer(&pivot)) - ccheckErrors := *(*C.int)(unsafe.Pointer(&checkErrors)) - C.atg__lu_with_info(ptr, self, cpivot, ccheckErrors) -} -func Atg_MakePerChannelQuantizedTensor(ptr *Ctensor, self Ctensor, scale Ctensor, zeroPoint Ctensor, axis int64) { - caxis := *(*C.int64_t)(unsafe.Pointer(&axis)) - C.atg__make_per_channel_quantized_tensor(ptr, self, scale, zeroPoint, caxis) -} -func Atg_MakePerTensorQuantizedTensor(ptr *Ctensor, self Ctensor, scale float64, zeroPoint int64) { - cscale := *(*C.double)(unsafe.Pointer(&scale)) - czeroPoint := *(*C.int64_t)(unsafe.Pointer(&zeroPoint)) - C.atg__make_per_tensor_quantized_tensor(ptr, self, cscale, czeroPoint) -} -func Atg_MaskedScale(ptr *Ctensor, self Ctensor, mask Ctensor, scale float64) { - cscale := *(*C.double)(unsafe.Pointer(&scale)) - C.atg__masked_scale(ptr, self, mask, cscale) -} -func Atg_Max(ptr *Ctensor, self Ctensor, dim int64, keepdim int32) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) - C.atg__max(ptr, self, cdim, ckeepdim) -} -func Atg_MaxOut(ptr *Ctensor, max Ctensor, maxIndices Ctensor, self Ctensor, dim int64, keepdim int32) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) - C.atg__max_out(ptr, max, maxIndices, self, cdim, ckeepdim) -} -func Atg_Min(ptr *Ctensor, self Ctensor, dim int64, keepdim int32) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) - C.atg__min(ptr, self, cdim, ckeepdim) -} -func Atg_MinOut(ptr *Ctensor, min Ctensor, minIndices Ctensor, self Ctensor, dim int64, keepdim int32) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) - C.atg__min_out(ptr, min, minIndices, self, cdim, ckeepdim) -} -func Atg_MkldnnReshape(ptr *Ctensor, self Ctensor, shapeData []int64, shapeLen int) { - cshapeDataPtr := (*C.int64_t)(unsafe.Pointer(&shapeData[0])) - cshapeLen := *(*C.int)(unsafe.Pointer(&shapeLen)) - C.atg__mkldnn_reshape(ptr, self, cshapeDataPtr, cshapeLen) -} -func Atg_MkldnnTranspose(ptr *Ctensor, self Ctensor, dim0 int64, dim1 int64) { - cdim0 := *(*C.int64_t)(unsafe.Pointer(&dim0)) - cdim1 := *(*C.int64_t)(unsafe.Pointer(&dim1)) - C.atg__mkldnn_transpose(ptr, self, cdim0, cdim1) -} -func Atg_MkldnnTranspose_(ptr *Ctensor, self Ctensor, dim0 int64, dim1 int64) { - cdim0 := *(*C.int64_t)(unsafe.Pointer(&dim0)) - cdim1 := *(*C.int64_t)(unsafe.Pointer(&dim1)) - C.atg__mkldnn_transpose_(ptr, self, cdim0, cdim1) -} -func Atg_Mode(ptr *Ctensor, self Ctensor, dim int64, keepdim int32) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) - C.atg__mode(ptr, self, cdim, ckeepdim) -} -func Atg_ModeOut(ptr *Ctensor, values Ctensor, indices Ctensor, self Ctensor, dim int64, keepdim int32) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) - C.atg__mode_out(ptr, values, indices, self, cdim, ckeepdim) -} -func Atg_MultinomialAliasDraw(ptr *Ctensor, j Ctensor, q Ctensor, numSamples int64) { - cnumSamples := *(*C.int64_t)(unsafe.Pointer(&numSamples)) - C.atg__multinomial_alias_draw(ptr, j, q, cnumSamples) -} -func Atg_MultinomialAliasSetup(ptr *Ctensor, probs Ctensor) { - C.atg__multinomial_alias_setup(ptr, probs) -} -func Atg_NnpackSpatialConvolution(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int) { - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - C.atg__nnpack_spatial_convolution(ptr, input, weight, bias, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen) -} -func Atg_NnpackSpatialConvolutionBackwardInput(ptr *Ctensor, input Ctensor, gradOutput Ctensor, weight Ctensor, paddingData []int64, paddingLen int) { - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - C.atg__nnpack_spatial_convolution_backward_input(ptr, input, gradOutput, weight, cpaddingDataPtr, cpaddingLen) -} -func Atg_NnpackSpatialConvolutionBackwardWeight(ptr *Ctensor, input Ctensor, weightsizeData []int64, weightsizeLen int, gradOutput Ctensor, paddingData []int64, paddingLen int) { - cweightsizeDataPtr := (*C.int64_t)(unsafe.Pointer(&weightsizeData[0])) - cweightsizeLen := *(*C.int)(unsafe.Pointer(&weightsizeLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - C.atg__nnpack_spatial_convolution_backward_weight(ptr, input, cweightsizeDataPtr, cweightsizeLen, gradOutput, cpaddingDataPtr, cpaddingLen) -} -func Atg_PackPaddedSequence(ptr *Ctensor, input Ctensor, lengths Ctensor, batchFirst int32) { - cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst)) - C.atg__pack_padded_sequence(ptr, input, lengths, cbatchFirst) -} -func Atg_PackPaddedSequenceBackward(ptr *Ctensor, grad Ctensor, inputSizeData []int64, inputSizeLen int, batchSizes Ctensor, batchFirst int32) { - cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) - cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) - cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst)) - C.atg__pack_padded_sequence_backward(ptr, grad, cinputSizeDataPtr, cinputSizeLen, batchSizes, cbatchFirst) -} -func Atg_PadPackedSequence(ptr *Ctensor, data Ctensor, batchSizes Ctensor, batchFirst int32, paddingValue Cscalar, totalLength int64) { - cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst)) - ctotalLength := *(*C.int64_t)(unsafe.Pointer(&totalLength)) - C.atg__pad_packed_sequence(ptr, data, batchSizes, cbatchFirst, paddingValue, ctotalLength) -} -func Atg_PdistBackward(ptr *Ctensor, grad Ctensor, self Ctensor, p float64, pdist Ctensor) { - cp := *(*C.double)(unsafe.Pointer(&p)) - C.atg__pdist_backward(ptr, grad, self, cp, pdist) -} -func Atg_QrHelper(ptr *Ctensor, self Ctensor, some int32) { - csome := *(*C.int)(unsafe.Pointer(&some)) - C.atg__qr_helper(ptr, self, csome) -} -func Atg_ReshapeFromTensor(ptr *Ctensor, self Ctensor, shape Ctensor) { - C.atg__reshape_from_tensor(ptr, self, shape) -} -func Atg_SWhere(ptr *Ctensor, condition Ctensor, self Ctensor, other Ctensor) { - C.atg__s_where(ptr, condition, self, other) -} -func Atg_SampleDirichlet(ptr *Ctensor, self Ctensor) { - C.atg__sample_dirichlet(ptr, self) -} -func Atg_ShapeAsTensor(ptr *Ctensor, self Ctensor) { - C.atg__shape_as_tensor(ptr, self) -} -func Atg_SobolEngineDraw(ptr *Ctensor, quasi Ctensor, n int64, sobolstate Ctensor, dimension int64, numGenerated int64, dtype int32) { - cn := *(*C.int64_t)(unsafe.Pointer(&n)) - cdimension := *(*C.int64_t)(unsafe.Pointer(&dimension)) - cnumGenerated := *(*C.int64_t)(unsafe.Pointer(&numGenerated)) - cdtype := *(*C.int)(unsafe.Pointer(&dtype)) - C.atg__sobol_engine_draw(ptr, quasi, cn, sobolstate, cdimension, cnumGenerated, cdtype) -} -func Atg_SobolEngineFf_(ptr *Ctensor, self Ctensor, n int64, sobolstate Ctensor, dimension int64, numGenerated int64) { - cn := *(*C.int64_t)(unsafe.Pointer(&n)) - cdimension := *(*C.int64_t)(unsafe.Pointer(&dimension)) - cnumGenerated := *(*C.int64_t)(unsafe.Pointer(&numGenerated)) - C.atg__sobol_engine_ff_(ptr, self, cn, sobolstate, cdimension, cnumGenerated) -} -func Atg_SobolEngineInitializeState_(ptr *Ctensor, self Ctensor, dimension int64) { - cdimension := *(*C.int64_t)(unsafe.Pointer(&dimension)) - C.atg__sobol_engine_initialize_state_(ptr, self, cdimension) -} -func Atg_SobolEngineScramble_(ptr *Ctensor, self Ctensor, ltm Ctensor, dimension int64) { - cdimension := *(*C.int64_t)(unsafe.Pointer(&dimension)) - C.atg__sobol_engine_scramble_(ptr, self, ltm, cdimension) -} -func Atg_Softmax(ptr *Ctensor, self Ctensor, dim int64, halfToFloat int32) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - chalfToFloat := *(*C.int)(unsafe.Pointer(&halfToFloat)) - C.atg__softmax(ptr, self, cdim, chalfToFloat) -} -func Atg_SoftmaxBackwardData(ptr *Ctensor, gradOutput Ctensor, output Ctensor, dim int64, self Ctensor) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - C.atg__softmax_backward_data(ptr, gradOutput, output, cdim, self) -} -func Atg_SolveHelper(ptr *Ctensor, self Ctensor, a Ctensor) { - C.atg__solve_helper(ptr, self, a) -} -func Atg_SparseAddmm(ptr *Ctensor, self Ctensor, sparse Ctensor, dense Ctensor) { - C.atg__sparse_addmm(ptr, self, sparse, dense) -} -func Atg_SparseCooTensorUnsafe(ptr *Ctensor, indices Ctensor, values Ctensor, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32) { - csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) - csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) - coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) - coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) - C.atg__sparse_coo_tensor_unsafe(ptr, indices, values, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice) -} -func Atg_SparseCooTensorWithDims(ptr *Ctensor, sparseDim int64, denseDim int64, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32) { - csparseDim := *(*C.int64_t)(unsafe.Pointer(&sparseDim)) - cdenseDim := *(*C.int64_t)(unsafe.Pointer(&denseDim)) - csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) - csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) - coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) - coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) - C.atg__sparse_coo_tensor_with_dims(ptr, csparseDim, cdenseDim, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice) -} -func Atg_SparseCooTensorWithDimsAndTensors(ptr *Ctensor, sparseDim int64, denseDim int64, sizeData []int64, sizeLen int, indices Ctensor, values Ctensor, optionsKind int32, optionsDevice int32) { - csparseDim := *(*C.int64_t)(unsafe.Pointer(&sparseDim)) - cdenseDim := *(*C.int64_t)(unsafe.Pointer(&denseDim)) - csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) - csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) - coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) - coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) - C.atg__sparse_coo_tensor_with_dims_and_tensors(ptr, csparseDim, cdenseDim, csizeDataPtr, csizeLen, indices, values, coptionsKind, coptionsDevice) -} -func Atg_SparseMm(ptr *Ctensor, sparse Ctensor, dense Ctensor) { - C.atg__sparse_mm(ptr, sparse, dense) -} -func Atg_SparseSum(ptr *Ctensor, self Ctensor) { - C.atg__sparse_sum(ptr, self) -} -func Atg_SparseSum1(ptr *Ctensor, self Ctensor, dtype int32) { - cdtype := *(*C.int)(unsafe.Pointer(&dtype)) - C.atg__sparse_sum1(ptr, self, cdtype) -} -func Atg_SparseSum2(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int) { - cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) - cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) - C.atg__sparse_sum2(ptr, self, cdimDataPtr, cdimLen) -} -func Atg_SparseSum3(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, dtype int32) { - cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) - cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) - cdtype := *(*C.int)(unsafe.Pointer(&dtype)) - C.atg__sparse_sum3(ptr, self, cdimDataPtr, cdimLen, cdtype) -} -func Atg_SparseSumBackward(ptr *Ctensor, grad Ctensor, self Ctensor, dimData []int64, dimLen int) { - cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) - cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) - C.atg__sparse_sum_backward(ptr, grad, self, cdimDataPtr, cdimLen) -} -func Atg_StandardGamma(ptr *Ctensor, self Ctensor) { - C.atg__standard_gamma(ptr, self) -} -func Atg_StandardGammaGrad(ptr *Ctensor, self Ctensor, output Ctensor) { - C.atg__standard_gamma_grad(ptr, self, output) -} -func Atg_Std(ptr *Ctensor, self Ctensor, unbiased int32) { - cunbiased := *(*C.int)(unsafe.Pointer(&unbiased)) - C.atg__std(ptr, self, cunbiased) -} -func Atg_SvdHelper(ptr *Ctensor, self Ctensor, some int32, computeUv int32) { - csome := *(*C.int)(unsafe.Pointer(&some)) - ccomputeUv := *(*C.int)(unsafe.Pointer(&computeUv)) - C.atg__svd_helper(ptr, self, csome, ccomputeUv) -} -func Atg_SymeigHelper(ptr *Ctensor, self Ctensor, eigenvectors int32, upper int32) { - ceigenvectors := *(*C.int)(unsafe.Pointer(&eigenvectors)) - cupper := *(*C.int)(unsafe.Pointer(&upper)) - C.atg__symeig_helper(ptr, self, ceigenvectors, cupper) -} -func Atg_TriangularSolveHelper(ptr *Ctensor, self Ctensor, a Ctensor, upper int32, transpose int32, unitriangular int32) { - cupper := *(*C.int)(unsafe.Pointer(&upper)) - ctranspose := *(*C.int)(unsafe.Pointer(&transpose)) - cunitriangular := *(*C.int)(unsafe.Pointer(&unitriangular)) - C.atg__triangular_solve_helper(ptr, self, a, cupper, ctranspose, cunitriangular) -} -func Atg_Trilinear(ptr *Ctensor, i1 Ctensor, i2 Ctensor, i3 Ctensor, expand1Data []int64, expand1Len int, expand2Data []int64, expand2Len int, expand3Data []int64, expand3Len int, sumdimData []int64, sumdimLen int, unrollDim int64) { - cexpand1DataPtr := (*C.int64_t)(unsafe.Pointer(&expand1Data[0])) - cexpand1Len := *(*C.int)(unsafe.Pointer(&expand1Len)) - cexpand2DataPtr := (*C.int64_t)(unsafe.Pointer(&expand2Data[0])) - cexpand2Len := *(*C.int)(unsafe.Pointer(&expand2Len)) - cexpand3DataPtr := (*C.int64_t)(unsafe.Pointer(&expand3Data[0])) - cexpand3Len := *(*C.int)(unsafe.Pointer(&expand3Len)) - csumdimDataPtr := (*C.int64_t)(unsafe.Pointer(&sumdimData[0])) - csumdimLen := *(*C.int)(unsafe.Pointer(&sumdimLen)) - cunrollDim := *(*C.int64_t)(unsafe.Pointer(&unrollDim)) - C.atg__trilinear(ptr, i1, i2, i3, cexpand1DataPtr, cexpand1Len, cexpand2DataPtr, cexpand2Len, cexpand3DataPtr, cexpand3Len, csumdimDataPtr, csumdimLen, cunrollDim) -} -func Atg_Unique(ptr *Ctensor, self Ctensor, sorted int32, returnInverse int32) { - csorted := *(*C.int)(unsafe.Pointer(&sorted)) - creturnInverse := *(*C.int)(unsafe.Pointer(&returnInverse)) - C.atg__unique(ptr, self, csorted, creturnInverse) -} -func Atg_Unique2(ptr *Ctensor, self Ctensor, sorted int32, returnInverse int32, returnCounts int32) { - csorted := *(*C.int)(unsafe.Pointer(&sorted)) - creturnInverse := *(*C.int)(unsafe.Pointer(&returnInverse)) - creturnCounts := *(*C.int)(unsafe.Pointer(&returnCounts)) - C.atg__unique2(ptr, self, csorted, creturnInverse, creturnCounts) -} -func Atg_UnsafeView(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int) { - csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) - csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) - C.atg__unsafe_view(ptr, self, csizeDataPtr, csizeLen) -} -func Atg_Values(ptr *Ctensor, self Ctensor) { - C.atg__values(ptr, self) -} -func Atg_Var(ptr *Ctensor, self Ctensor, unbiased int32) { - cunbiased := *(*C.int)(unsafe.Pointer(&unbiased)) - C.atg__var(ptr, self, cunbiased) -} -func Atg_WeightNorm(ptr *Ctensor, v Ctensor, g Ctensor, dim int64) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - C.atg__weight_norm(ptr, v, g, cdim) -} -func Atg_WeightNormCudaInterface(ptr *Ctensor, v Ctensor, g Ctensor, dim int64) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - C.atg__weight_norm_cuda_interface(ptr, v, g, cdim) -} -func Atg_WeightNormCudaInterfaceBackward(ptr *Ctensor, gradW Ctensor, savedV Ctensor, savedG Ctensor, savedNorms Ctensor, dim int64) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - C.atg__weight_norm_cuda_interface_backward(ptr, gradW, savedV, savedG, savedNorms, cdim) -} -func Atg_WeightNormDifferentiableBackward(ptr *Ctensor, gradW Ctensor, savedV Ctensor, savedG Ctensor, savedNorms Ctensor, dim int64) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - C.atg__weight_norm_differentiable_backward(ptr, gradW, savedV, savedG, savedNorms, cdim) -} -func AtgAbs(ptr *Ctensor, self Ctensor) { - C.atg_abs(ptr, self) -} -func AtgAbs_(ptr *Ctensor, self Ctensor) { - C.atg_abs_(ptr, self) -} -func AtgAbsOut(ptr *Ctensor, out Ctensor, self Ctensor) { - C.atg_abs_out(ptr, out, self) -} -func AtgAcos(ptr *Ctensor, self Ctensor) { - C.atg_acos(ptr, self) -} -func AtgAcos_(ptr *Ctensor, self Ctensor) { - C.atg_acos_(ptr, self) -} -func AtgAcosOut(ptr *Ctensor, out Ctensor, self Ctensor) { - C.atg_acos_out(ptr, out, self) +func Atg_AddRelu_(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg__add_relu_(ptr, self, other) } -func AtgAdaptiveAvgPool1d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int) { - coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) - coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) - C.atg_adaptive_avg_pool1d(ptr, self, coutputSizeDataPtr, coutputSizeLen) +func Atg_AddReluOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ +C.atg__add_relu_out(ptr, out, self, other) } -func AtgAdaptiveAvgPool2d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int) { - coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) - coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) - C.atg_adaptive_avg_pool2d(ptr, self, coutputSizeDataPtr, coutputSizeLen) +func Atg_AddmvImpl_(ptr *Ctensor, self Ctensor, self2 Ctensor, mat Ctensor, vec Ctensor){ +C.atg__addmv_impl_(ptr, self, self2, mat, vec) } -func AtgAdaptiveAvgPool2dOut(ptr *Ctensor, out Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int) { - coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) - coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) - C.atg_adaptive_avg_pool2d_out(ptr, out, self, coutputSizeDataPtr, coutputSizeLen) +func Atg_Aminmax(ptr *Ctensor, self Ctensor){ +C.atg__aminmax(ptr, self) } -func AtgAdaptiveAvgPool3d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int) { - coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) - coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) - C.atg_adaptive_avg_pool3d(ptr, self, coutputSizeDataPtr, coutputSizeLen) +func Atg_Aminmax1(ptr *Ctensor, self Ctensor, dim int64, keepdim int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg__aminmax1(ptr, self, cdim, ckeepdim) } -func AtgAdaptiveAvgPool3dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor) { - C.atg_adaptive_avg_pool3d_backward(ptr, gradOutput, self) +func Atg_AmpUpdateScale(ptr *Ctensor, growthTracker Ctensor, currentScale Ctensor, foundInf Ctensor, scaleGrowthFactor float64, scaleBackoffFactor float64, growthInterval int64){ +cscaleGrowthFactor := *(*C.double)(unsafe.Pointer(&scaleGrowthFactor)) +cscaleBackoffFactor := *(*C.double)(unsafe.Pointer(&scaleBackoffFactor)) +cgrowthInterval := *(*C.int64_t)(unsafe.Pointer(&growthInterval)) +C.atg__amp_update_scale(ptr, growthTracker, currentScale, foundInf, cscaleGrowthFactor, cscaleBackoffFactor, cgrowthInterval) } -func AtgAdaptiveAvgPool3dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor) { - C.atg_adaptive_avg_pool3d_backward_out(ptr, gradInput, gradOutput, self) +func Atg_BaddbmmMkl_(ptr *Ctensor, self Ctensor, batch1 Ctensor, batch2 Ctensor){ +C.atg__baddbmm_mkl_(ptr, self, batch1, batch2) } -func AtgAdaptiveAvgPool3dOut(ptr *Ctensor, out Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int) { - coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) - coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) - C.atg_adaptive_avg_pool3d_out(ptr, out, self, coutputSizeDataPtr, coutputSizeLen) +func Atg_Bmm(ptr *Ctensor, self Ctensor, mat2 Ctensor, deterministic int32){ +cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) +C.atg__bmm(ptr, self, mat2, cdeterministic) } -func AtgAdaptiveMaxPool1d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int) { - coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) - coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) - C.atg_adaptive_max_pool1d(ptr, self, coutputSizeDataPtr, coutputSizeLen) +func Atg_BmmOut(ptr *Ctensor, out Ctensor, self Ctensor, mat2 Ctensor, deterministic int32){ +cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) +C.atg__bmm_out(ptr, out, self, mat2, cdeterministic) +} +func Atg_CastByte(ptr *Ctensor, self Ctensor, nonBlocking int32){ +cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) +C.atg__cast_byte(ptr, self, cnonBlocking) +} +func Atg_CastChar(ptr *Ctensor, self Ctensor, nonBlocking int32){ +cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) +C.atg__cast_char(ptr, self, cnonBlocking) +} +func Atg_CastDouble(ptr *Ctensor, self Ctensor, nonBlocking int32){ +cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) +C.atg__cast_double(ptr, self, cnonBlocking) +} +func Atg_CastFloat(ptr *Ctensor, self Ctensor, nonBlocking int32){ +cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) +C.atg__cast_float(ptr, self, cnonBlocking) +} +func Atg_CastHalf(ptr *Ctensor, self Ctensor, nonBlocking int32){ +cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) +C.atg__cast_half(ptr, self, cnonBlocking) +} +func Atg_CastInt(ptr *Ctensor, self Ctensor, nonBlocking int32){ +cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) +C.atg__cast_int(ptr, self, cnonBlocking) +} +func Atg_CastLong(ptr *Ctensor, self Ctensor, nonBlocking int32){ +cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) +C.atg__cast_long(ptr, self, cnonBlocking) +} +func Atg_CastShort(ptr *Ctensor, self Ctensor, nonBlocking int32){ +cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) +C.atg__cast_short(ptr, self, cnonBlocking) +} +func Atg_Cat(ptr *Ctensor, tensorsData []Ctensor, tensorsLen int, dim int64){ +ctensorsDataPtr := (*Ctensor)(unsafe.Pointer(&tensorsData[0])) +ctensorsLen := *(*C.int)(unsafe.Pointer(&tensorsLen)) +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg__cat(ptr, ctensorsDataPtr, ctensorsLen, cdim) +} +func Atg_CatOut(ptr *Ctensor, out Ctensor, tensorsData []Ctensor, tensorsLen int, dim int64){ +ctensorsDataPtr := (*Ctensor)(unsafe.Pointer(&tensorsData[0])) +ctensorsLen := *(*C.int)(unsafe.Pointer(&tensorsLen)) +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg__cat_out(ptr, out, ctensorsDataPtr, ctensorsLen, cdim) +} +func Atg_CdistBackward(ptr *Ctensor, grad Ctensor, x1 Ctensor, x2 Ctensor, p float64, cdist Ctensor){ +cp := *(*C.double)(unsafe.Pointer(&p)) +C.atg__cdist_backward(ptr, grad, x1, x2, cp, cdist) +} +func Atg_CholeskyHelper(ptr *Ctensor, self Ctensor, upper int32){ +cupper := *(*C.int)(unsafe.Pointer(&upper)) +C.atg__cholesky_helper(ptr, self, cupper) +} +func Atg_CholeskySolveHelper(ptr *Ctensor, self Ctensor, a Ctensor, upper int32){ +cupper := *(*C.int)(unsafe.Pointer(&upper)) +C.atg__cholesky_solve_helper(ptr, self, a, cupper) +} +func Atg_Coalesced_(ptr *Ctensor, self Ctensor, coalesced int32){ +ccoalesced := *(*C.int)(unsafe.Pointer(&coalesced)) +C.atg__coalesced_(ptr, self, ccoalesced) +} +func Atg_ComputeLinearCombination(ptr *Ctensor, input Ctensor, coefficients Ctensor){ +C.atg__compute_linear_combination(ptr, input, coefficients) +} +func Atg_ComputeLinearCombinationOut(ptr *Ctensor, out Ctensor, input Ctensor, coefficients Ctensor){ +C.atg__compute_linear_combination_out(ptr, out, input, coefficients) +} +func Atg_Conj(ptr *Ctensor, self Ctensor){ +C.atg__conj(ptr, self) +} +func Atg_Convolution(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, transposed int32, outputPaddingData []int64, outputPaddingLen int, groups int64, benchmark int32, deterministic int32, cudnnEnabled int32){ +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +ctransposed := *(*C.int)(unsafe.Pointer(&transposed)) +coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) +coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) +cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) +cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) +cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) +ccudnnEnabled := *(*C.int)(unsafe.Pointer(&cudnnEnabled)) +C.atg__convolution(ptr, input, weight, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, ctransposed, coutputPaddingDataPtr, coutputPaddingLen, cgroups, cbenchmark, cdeterministic, ccudnnEnabled) +} +func Atg_Convolution1(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, transposed int32, outputPaddingData []int64, outputPaddingLen int, groups int64, benchmark int32, deterministic int32, cudnnEnabled int32, allowTf32 int32){ +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +ctransposed := *(*C.int)(unsafe.Pointer(&transposed)) +coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) +coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) +cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) +cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) +cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) +ccudnnEnabled := *(*C.int)(unsafe.Pointer(&cudnnEnabled)) +callowTf32 := *(*C.int)(unsafe.Pointer(&allowTf32)) +C.atg__convolution1(ptr, input, weight, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, ctransposed, coutputPaddingDataPtr, coutputPaddingLen, cgroups, cbenchmark, cdeterministic, ccudnnEnabled, callowTf32) +} +func Atg_ConvolutionNogroup(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, transposed int32, outputPaddingData []int64, outputPaddingLen int){ +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +ctransposed := *(*C.int)(unsafe.Pointer(&transposed)) +coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) +coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) +C.atg__convolution_nogroup(ptr, input, weight, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, ctransposed, coutputPaddingDataPtr, coutputPaddingLen) +} +func Atg_CopyFrom(ptr *Ctensor, self Ctensor, dst Ctensor, nonBlocking int32){ +cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) +C.atg__copy_from(ptr, self, dst, cnonBlocking) +} +func Atg_CtcLoss(ptr *Ctensor, logProbs Ctensor, targets Ctensor, inputLengthsData []int64, inputLengthsLen int, targetLengthsData []int64, targetLengthsLen int, blank int64, zeroInfinity int32){ +cinputLengthsDataPtr := (*C.int64_t)(unsafe.Pointer(&inputLengthsData[0])) +cinputLengthsLen := *(*C.int)(unsafe.Pointer(&inputLengthsLen)) +ctargetLengthsDataPtr := (*C.int64_t)(unsafe.Pointer(&targetLengthsData[0])) +ctargetLengthsLen := *(*C.int)(unsafe.Pointer(&targetLengthsLen)) +cblank := *(*C.int64_t)(unsafe.Pointer(&blank)) +czeroInfinity := *(*C.int)(unsafe.Pointer(&zeroInfinity)) +C.atg__ctc_loss(ptr, logProbs, targets, cinputLengthsDataPtr, cinputLengthsLen, ctargetLengthsDataPtr, ctargetLengthsLen, cblank, czeroInfinity) +} +func Atg_CtcLossBackward(ptr *Ctensor, grad Ctensor, logProbs Ctensor, targets Ctensor, inputLengthsData []int64, inputLengthsLen int, targetLengthsData []int64, targetLengthsLen int, negLogLikelihood Ctensor, logAlpha Ctensor, blank int64, zeroInfinity int32){ +cinputLengthsDataPtr := (*C.int64_t)(unsafe.Pointer(&inputLengthsData[0])) +cinputLengthsLen := *(*C.int)(unsafe.Pointer(&inputLengthsLen)) +ctargetLengthsDataPtr := (*C.int64_t)(unsafe.Pointer(&targetLengthsData[0])) +ctargetLengthsLen := *(*C.int)(unsafe.Pointer(&targetLengthsLen)) +cblank := *(*C.int64_t)(unsafe.Pointer(&blank)) +czeroInfinity := *(*C.int)(unsafe.Pointer(&zeroInfinity)) +C.atg__ctc_loss_backward(ptr, grad, logProbs, targets, cinputLengthsDataPtr, cinputLengthsLen, ctargetLengthsDataPtr, ctargetLengthsLen, negLogLikelihood, logAlpha, cblank, czeroInfinity) +} +func Atg_CudnnCtcLoss(ptr *Ctensor, logProbs Ctensor, targets Ctensor, inputLengthsData []int64, inputLengthsLen int, targetLengthsData []int64, targetLengthsLen int, blank int64, deterministic int32, zeroInfinity int32){ +cinputLengthsDataPtr := (*C.int64_t)(unsafe.Pointer(&inputLengthsData[0])) +cinputLengthsLen := *(*C.int)(unsafe.Pointer(&inputLengthsLen)) +ctargetLengthsDataPtr := (*C.int64_t)(unsafe.Pointer(&targetLengthsData[0])) +ctargetLengthsLen := *(*C.int)(unsafe.Pointer(&targetLengthsLen)) +cblank := *(*C.int64_t)(unsafe.Pointer(&blank)) +cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) +czeroInfinity := *(*C.int)(unsafe.Pointer(&zeroInfinity)) +C.atg__cudnn_ctc_loss(ptr, logProbs, targets, cinputLengthsDataPtr, cinputLengthsLen, ctargetLengthsDataPtr, ctargetLengthsLen, cblank, cdeterministic, czeroInfinity) +} +func Atg_CudnnInitDropoutState(ptr *Ctensor, dropout float64, train int32, dropoutSeed int64, optionsKind int32, optionsDevice int32){ +cdropout := *(*C.double)(unsafe.Pointer(&dropout)) +ctrain := *(*C.int)(unsafe.Pointer(&train)) +cdropoutSeed := *(*C.int64_t)(unsafe.Pointer(&dropoutSeed)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg__cudnn_init_dropout_state(ptr, cdropout, ctrain, cdropoutSeed, coptionsKind, coptionsDevice) +} +func Atg_CudnnRnn(ptr *Ctensor, input Ctensor, weightData []Ctensor, weightLen int, weightStride0 int64, weightBuf Ctensor, hx Ctensor, cx Ctensor, mode int64, hiddenSize int64, numLayers int64, batchFirst int32, dropout float64, train int32, bidirectional int32, batchSizesData []int64, batchSizesLen int, dropoutState Ctensor){ +cweightDataPtr := (*Ctensor)(unsafe.Pointer(&weightData[0])) +cweightLen := *(*C.int)(unsafe.Pointer(&weightLen)) +cweightStride0 := *(*C.int64_t)(unsafe.Pointer(&weightStride0)) +cmode := *(*C.int64_t)(unsafe.Pointer(&mode)) +chiddenSize := *(*C.int64_t)(unsafe.Pointer(&hiddenSize)) +cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) +cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst)) +cdropout := *(*C.double)(unsafe.Pointer(&dropout)) +ctrain := *(*C.int)(unsafe.Pointer(&train)) +cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) +cbatchSizesDataPtr := (*C.int64_t)(unsafe.Pointer(&batchSizesData[0])) +cbatchSizesLen := *(*C.int)(unsafe.Pointer(&batchSizesLen)) +C.atg__cudnn_rnn(ptr, input, cweightDataPtr, cweightLen, cweightStride0, weightBuf, hx, cx, cmode, chiddenSize, cnumLayers, cbatchFirst, cdropout, ctrain, cbidirectional, cbatchSizesDataPtr, cbatchSizesLen, dropoutState) +} +func Atg_CudnnRnnFlattenWeight(ptr *Ctensor, weightArrData []Ctensor, weightArrLen int, weightStride0 int64, inputSize int64, mode int64, hiddenSize int64, numLayers int64, batchFirst int32, bidirectional int32){ +cweightArrDataPtr := (*Ctensor)(unsafe.Pointer(&weightArrData[0])) +cweightArrLen := *(*C.int)(unsafe.Pointer(&weightArrLen)) +cweightStride0 := *(*C.int64_t)(unsafe.Pointer(&weightStride0)) +cinputSize := *(*C.int64_t)(unsafe.Pointer(&inputSize)) +cmode := *(*C.int64_t)(unsafe.Pointer(&mode)) +chiddenSize := *(*C.int64_t)(unsafe.Pointer(&hiddenSize)) +cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) +cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst)) +cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) +C.atg__cudnn_rnn_flatten_weight(ptr, cweightArrDataPtr, cweightArrLen, cweightStride0, cinputSize, cmode, chiddenSize, cnumLayers, cbatchFirst, cbidirectional) +} +func Atg_Cumprod(ptr *Ctensor, self Ctensor, dim int64){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg__cumprod(ptr, self, cdim) +} +func Atg_CumprodOut(ptr *Ctensor, out Ctensor, self Ctensor, dim int64){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg__cumprod_out(ptr, out, self, cdim) +} +func Atg_Cumsum(ptr *Ctensor, self Ctensor, dim int64){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg__cumsum(ptr, self, cdim) +} +func Atg_CumsumOut(ptr *Ctensor, out Ctensor, self Ctensor, dim int64){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg__cumsum_out(ptr, out, self, cdim) +} +func Atg_DimArange(ptr *Ctensor, like Ctensor, dim int64){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg__dim_arange(ptr, like, cdim) +} +func Atg_DirichletGrad(ptr *Ctensor, x Ctensor, alpha Ctensor, total Ctensor){ +C.atg__dirichlet_grad(ptr, x, alpha, total) +} +func Atg_EmbeddingBag(ptr *Ctensor, weight Ctensor, indices Ctensor, offsets Ctensor, scaleGradByFreq int32, mode int64, sparse int32, perSampleWeights Ctensor, includeLastOffset int32){ +cscaleGradByFreq := *(*C.int)(unsafe.Pointer(&scaleGradByFreq)) +cmode := *(*C.int64_t)(unsafe.Pointer(&mode)) +csparse := *(*C.int)(unsafe.Pointer(&sparse)) +cincludeLastOffset := *(*C.int)(unsafe.Pointer(&includeLastOffset)) +C.atg__embedding_bag(ptr, weight, indices, offsets, cscaleGradByFreq, cmode, csparse, perSampleWeights, cincludeLastOffset) +} +func Atg_EmbeddingBagBackward(ptr *Ctensor, grad Ctensor, indices Ctensor, offsets Ctensor, offset2bag Ctensor, bagSize Ctensor, maximumIndices Ctensor, numWeights int64, scaleGradByFreq int32, mode int64, sparse int32, perSampleWeights Ctensor){ +cnumWeights := *(*C.int64_t)(unsafe.Pointer(&numWeights)) +cscaleGradByFreq := *(*C.int)(unsafe.Pointer(&scaleGradByFreq)) +cmode := *(*C.int64_t)(unsafe.Pointer(&mode)) +csparse := *(*C.int)(unsafe.Pointer(&sparse)) +C.atg__embedding_bag_backward(ptr, grad, indices, offsets, offset2bag, bagSize, maximumIndices, cnumWeights, cscaleGradByFreq, cmode, csparse, perSampleWeights) +} +func Atg_EmbeddingBagDenseBackward(ptr *Ctensor, grad Ctensor, indices Ctensor, offsets Ctensor, offset2bag Ctensor, bagSize Ctensor, maximumIndices Ctensor, numWeights int64, scaleGradByFreq int32, mode int64, perSampleWeights Ctensor){ +cnumWeights := *(*C.int64_t)(unsafe.Pointer(&numWeights)) +cscaleGradByFreq := *(*C.int)(unsafe.Pointer(&scaleGradByFreq)) +cmode := *(*C.int64_t)(unsafe.Pointer(&mode)) +C.atg__embedding_bag_dense_backward(ptr, grad, indices, offsets, offset2bag, bagSize, maximumIndices, cnumWeights, cscaleGradByFreq, cmode, perSampleWeights) +} +func Atg_EmbeddingBagForwardOnly(ptr *Ctensor, weight Ctensor, indices Ctensor, offsets Ctensor, scaleGradByFreq int32, mode int64, sparse int32, perSampleWeights Ctensor, includeLastOffset int32){ +cscaleGradByFreq := *(*C.int)(unsafe.Pointer(&scaleGradByFreq)) +cmode := *(*C.int64_t)(unsafe.Pointer(&mode)) +csparse := *(*C.int)(unsafe.Pointer(&sparse)) +cincludeLastOffset := *(*C.int)(unsafe.Pointer(&includeLastOffset)) +C.atg__embedding_bag_forward_only(ptr, weight, indices, offsets, cscaleGradByFreq, cmode, csparse, perSampleWeights, cincludeLastOffset) +} +func Atg_EmbeddingBagPerSampleWeightsBackward(ptr *Ctensor, grad Ctensor, weight Ctensor, indices Ctensor, offsets Ctensor, offset2bag Ctensor, mode int64){ +cmode := *(*C.int64_t)(unsafe.Pointer(&mode)) +C.atg__embedding_bag_per_sample_weights_backward(ptr, grad, weight, indices, offsets, offset2bag, cmode) +} +func Atg_EmbeddingBagSparseBackward(ptr *Ctensor, grad Ctensor, indices Ctensor, offsets Ctensor, offset2bag Ctensor, bagSize Ctensor, numWeights int64, scaleGradByFreq int32, mode int64, perSampleWeights Ctensor){ +cnumWeights := *(*C.int64_t)(unsafe.Pointer(&numWeights)) +cscaleGradByFreq := *(*C.int)(unsafe.Pointer(&scaleGradByFreq)) +cmode := *(*C.int64_t)(unsafe.Pointer(&mode)) +C.atg__embedding_bag_sparse_backward(ptr, grad, indices, offsets, offset2bag, bagSize, cnumWeights, cscaleGradByFreq, cmode, perSampleWeights) +} +func Atg_EmptyAffineQuantized(ptr *Ctensor, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32, scale float64, zeroPoint int64){ +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +cscale := *(*C.double)(unsafe.Pointer(&scale)) +czeroPoint := *(*C.int64_t)(unsafe.Pointer(&zeroPoint)) +C.atg__empty_affine_quantized(ptr, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice, cscale, czeroPoint) +} +func Atg_EmptyPerChannelAffineQuantized(ptr *Ctensor, sizeData []int64, sizeLen int, scales Ctensor, zeroPoints Ctensor, axis int64, optionsKind int32, optionsDevice int32){ +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +caxis := *(*C.int64_t)(unsafe.Pointer(&axis)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg__empty_per_channel_affine_quantized(ptr, csizeDataPtr, csizeLen, scales, zeroPoints, caxis, coptionsKind, coptionsDevice) +} +func Atg_EuclideanDist(ptr *Ctensor, x1 Ctensor, x2 Ctensor){ +C.atg__euclidean_dist(ptr, x1, x2) +} +func Atg_FakeQuantizeLearnablePerChannelAffine(ptr *Ctensor, self Ctensor, scale Ctensor, zeroPoint Ctensor, axis int64, quantMin int64, quantMax int64){ +caxis := *(*C.int64_t)(unsafe.Pointer(&axis)) +cquantMin := *(*C.int64_t)(unsafe.Pointer(&quantMin)) +cquantMax := *(*C.int64_t)(unsafe.Pointer(&quantMax)) +C.atg__fake_quantize_learnable_per_channel_affine(ptr, self, scale, zeroPoint, caxis, cquantMin, cquantMax) +} +func Atg_FakeQuantizeLearnablePerChannelAffineBackward(ptr *Ctensor, grad Ctensor, self Ctensor, scale Ctensor, zeroPoint Ctensor, axis int64, quantMin int64, quantMax int64){ +caxis := *(*C.int64_t)(unsafe.Pointer(&axis)) +cquantMin := *(*C.int64_t)(unsafe.Pointer(&quantMin)) +cquantMax := *(*C.int64_t)(unsafe.Pointer(&quantMax)) +C.atg__fake_quantize_learnable_per_channel_affine_backward(ptr, grad, self, scale, zeroPoint, caxis, cquantMin, cquantMax) +} +func Atg_FakeQuantizeLearnablePerTensorAffine(ptr *Ctensor, self Ctensor, scale Ctensor, zeroPoint Ctensor, quantMin int64, quantMax int64){ +cquantMin := *(*C.int64_t)(unsafe.Pointer(&quantMin)) +cquantMax := *(*C.int64_t)(unsafe.Pointer(&quantMax)) +C.atg__fake_quantize_learnable_per_tensor_affine(ptr, self, scale, zeroPoint, cquantMin, cquantMax) +} +func Atg_FakeQuantizeLearnablePerTensorAffineBackward(ptr *Ctensor, grad Ctensor, self Ctensor, scale Ctensor, zeroPoint Ctensor, quantMin int64, quantMax int64){ +cquantMin := *(*C.int64_t)(unsafe.Pointer(&quantMin)) +cquantMax := *(*C.int64_t)(unsafe.Pointer(&quantMax)) +C.atg__fake_quantize_learnable_per_tensor_affine_backward(ptr, grad, self, scale, zeroPoint, cquantMin, cquantMax) +} +func Atg_FftWithSize(ptr *Ctensor, self Ctensor, signalNdim int64, complexInput int32, complexOutput int32, inverse int32, checkedSignalSizesData []int64, checkedSignalSizesLen int, normalized int32, onesided int32, outputSizesData []int64, outputSizesLen int){ +csignalNdim := *(*C.int64_t)(unsafe.Pointer(&signalNdim)) +ccomplexInput := *(*C.int)(unsafe.Pointer(&complexInput)) +ccomplexOutput := *(*C.int)(unsafe.Pointer(&complexOutput)) +cinverse := *(*C.int)(unsafe.Pointer(&inverse)) +ccheckedSignalSizesDataPtr := (*C.int64_t)(unsafe.Pointer(&checkedSignalSizesData[0])) +ccheckedSignalSizesLen := *(*C.int)(unsafe.Pointer(&checkedSignalSizesLen)) +cnormalized := *(*C.int)(unsafe.Pointer(&normalized)) +conesided := *(*C.int)(unsafe.Pointer(&onesided)) +coutputSizesDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizesData[0])) +coutputSizesLen := *(*C.int)(unsafe.Pointer(&outputSizesLen)) +C.atg__fft_with_size(ptr, self, csignalNdim, ccomplexInput, ccomplexOutput, cinverse, ccheckedSignalSizesDataPtr, ccheckedSignalSizesLen, cnormalized, conesided, coutputSizesDataPtr, coutputSizesLen) +} +func Atg_FftWithSize1(ptr *Ctensor, self Ctensor, signalNdim int64, complexInput int32, complexOutput int32, inverse int32, checkedSignalSizesData []int64, checkedSignalSizesLen int, normalization int64, onesided int32, outputSizesData []int64, outputSizesLen int){ +csignalNdim := *(*C.int64_t)(unsafe.Pointer(&signalNdim)) +ccomplexInput := *(*C.int)(unsafe.Pointer(&complexInput)) +ccomplexOutput := *(*C.int)(unsafe.Pointer(&complexOutput)) +cinverse := *(*C.int)(unsafe.Pointer(&inverse)) +ccheckedSignalSizesDataPtr := (*C.int64_t)(unsafe.Pointer(&checkedSignalSizesData[0])) +ccheckedSignalSizesLen := *(*C.int)(unsafe.Pointer(&checkedSignalSizesLen)) +cnormalization := *(*C.int64_t)(unsafe.Pointer(&normalization)) +conesided := *(*C.int)(unsafe.Pointer(&onesided)) +coutputSizesDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizesData[0])) +coutputSizesLen := *(*C.int)(unsafe.Pointer(&outputSizesLen)) +C.atg__fft_with_size1(ptr, self, csignalNdim, ccomplexInput, ccomplexOutput, cinverse, ccheckedSignalSizesDataPtr, ccheckedSignalSizesLen, cnormalization, conesided, coutputSizesDataPtr, coutputSizesLen) +} +func Atg_FusedDropout(ptr *Ctensor, self Ctensor, p float64){ +cp := *(*C.double)(unsafe.Pointer(&p)) +C.atg__fused_dropout(ptr, self, cp) +} +func Atg_GatherSparseBackward(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, grad Ctensor){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg__gather_sparse_backward(ptr, self, cdim, index, grad) +} +func Atg_GridSampler2dCpuFallback(ptr *Ctensor, input Ctensor, grid Ctensor, interpolationMode int64, paddingMode int64, alignCorners int32){ +cinterpolationMode := *(*C.int64_t)(unsafe.Pointer(&interpolationMode)) +cpaddingMode := *(*C.int64_t)(unsafe.Pointer(&paddingMode)) +calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) +C.atg__grid_sampler_2d_cpu_fallback(ptr, input, grid, cinterpolationMode, cpaddingMode, calignCorners) +} +func Atg_GridSampler2dCpuFallbackBackward(ptr *Ctensor, gradOutput Ctensor, input Ctensor, grid Ctensor, interpolationMode int64, paddingMode int64, alignCorners int32){ +cinterpolationMode := *(*C.int64_t)(unsafe.Pointer(&interpolationMode)) +cpaddingMode := *(*C.int64_t)(unsafe.Pointer(&paddingMode)) +calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) +C.atg__grid_sampler_2d_cpu_fallback_backward(ptr, gradOutput, input, grid, cinterpolationMode, cpaddingMode, calignCorners) +} +func Atg_IndexCopy_(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, source Ctensor){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg__index_copy_(ptr, self, cdim, index, source) +} +func Atg_IndexPutImpl_(ptr *Ctensor, self Ctensor, indicesData []Ctensor, indicesLen int, values Ctensor, accumulate int32, unsafety int32){ +cindicesDataPtr := (*Ctensor)(unsafe.Pointer(&indicesData[0])) +cindicesLen := *(*C.int)(unsafe.Pointer(&indicesLen)) +caccumulate := *(*C.int)(unsafe.Pointer(&accumulate)) +cunsafety := *(*C.int)(unsafe.Pointer(&unsafety)) +C.atg__index_put_impl_(ptr, self, cindicesDataPtr, cindicesLen, values, caccumulate, cunsafety) +} +func Atg_Indices(ptr *Ctensor, self Ctensor){ +C.atg__indices(ptr, self) +} +func Atg_InverseHelper(ptr *Ctensor, self Ctensor){ +C.atg__inverse_helper(ptr, self) +} +func Atg_LogSoftmax(ptr *Ctensor, self Ctensor, dim int64, halfToFloat int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +chalfToFloat := *(*C.int)(unsafe.Pointer(&halfToFloat)) +C.atg__log_softmax(ptr, self, cdim, chalfToFloat) +} +func Atg_LogSoftmaxBackwardData(ptr *Ctensor, gradOutput Ctensor, output Ctensor, dim int64, self Ctensor){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg__log_softmax_backward_data(ptr, gradOutput, output, cdim, self) +} +func Atg_Logcumsumexp(ptr *Ctensor, self Ctensor, dim int64){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg__logcumsumexp(ptr, self, cdim) +} +func Atg_LogcumsumexpOut(ptr *Ctensor, out Ctensor, self Ctensor, dim int64){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg__logcumsumexp_out(ptr, out, self, cdim) +} +func Atg_LuSolveHelper(ptr *Ctensor, self Ctensor, lUData Ctensor, lUPivots Ctensor){ +C.atg__lu_solve_helper(ptr, self, lUData, lUPivots) +} +func Atg_LuWithInfo(ptr *Ctensor, self Ctensor, pivot int32, checkErrors int32){ +cpivot := *(*C.int)(unsafe.Pointer(&pivot)) +ccheckErrors := *(*C.int)(unsafe.Pointer(&checkErrors)) +C.atg__lu_with_info(ptr, self, cpivot, ccheckErrors) +} +func Atg_MakePerChannelQuantizedTensor(ptr *Ctensor, self Ctensor, scale Ctensor, zeroPoint Ctensor, axis int64){ +caxis := *(*C.int64_t)(unsafe.Pointer(&axis)) +C.atg__make_per_channel_quantized_tensor(ptr, self, scale, zeroPoint, caxis) +} +func Atg_MakePerTensorQuantizedTensor(ptr *Ctensor, self Ctensor, scale float64, zeroPoint int64){ +cscale := *(*C.double)(unsafe.Pointer(&scale)) +czeroPoint := *(*C.int64_t)(unsafe.Pointer(&zeroPoint)) +C.atg__make_per_tensor_quantized_tensor(ptr, self, cscale, czeroPoint) +} +func Atg_MaskedScale(ptr *Ctensor, self Ctensor, mask Ctensor, scale float64){ +cscale := *(*C.double)(unsafe.Pointer(&scale)) +C.atg__masked_scale(ptr, self, mask, cscale) +} +func Atg_MkldnnReshape(ptr *Ctensor, self Ctensor, shapeData []int64, shapeLen int){ +cshapeDataPtr := (*C.int64_t)(unsafe.Pointer(&shapeData[0])) +cshapeLen := *(*C.int)(unsafe.Pointer(&shapeLen)) +C.atg__mkldnn_reshape(ptr, self, cshapeDataPtr, cshapeLen) +} +func Atg_MkldnnTranspose(ptr *Ctensor, self Ctensor, dim0 int64, dim1 int64){ +cdim0 := *(*C.int64_t)(unsafe.Pointer(&dim0)) +cdim1 := *(*C.int64_t)(unsafe.Pointer(&dim1)) +C.atg__mkldnn_transpose(ptr, self, cdim0, cdim1) +} +func Atg_MkldnnTranspose_(ptr *Ctensor, self Ctensor, dim0 int64, dim1 int64){ +cdim0 := *(*C.int64_t)(unsafe.Pointer(&dim0)) +cdim1 := *(*C.int64_t)(unsafe.Pointer(&dim1)) +C.atg__mkldnn_transpose_(ptr, self, cdim0, cdim1) +} +func Atg_Mode(ptr *Ctensor, self Ctensor, dim int64, keepdim int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg__mode(ptr, self, cdim, ckeepdim) +} +func Atg_ModeOut(ptr *Ctensor, values Ctensor, indices Ctensor, self Ctensor, dim int64, keepdim int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg__mode_out(ptr, values, indices, self, cdim, ckeepdim) +} +func Atg_MultinomialAliasDraw(ptr *Ctensor, j Ctensor, q Ctensor, numSamples int64){ +cnumSamples := *(*C.int64_t)(unsafe.Pointer(&numSamples)) +C.atg__multinomial_alias_draw(ptr, j, q, cnumSamples) +} +func Atg_MultinomialAliasSetup(ptr *Ctensor, probs Ctensor){ +C.atg__multinomial_alias_setup(ptr, probs) +} +func Atg_NnpackSpatialConvolution(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int){ +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +C.atg__nnpack_spatial_convolution(ptr, input, weight, bias, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen) +} +func Atg_NnpackSpatialConvolutionBackwardInput(ptr *Ctensor, input Ctensor, gradOutput Ctensor, weight Ctensor, paddingData []int64, paddingLen int){ +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +C.atg__nnpack_spatial_convolution_backward_input(ptr, input, gradOutput, weight, cpaddingDataPtr, cpaddingLen) +} +func Atg_NnpackSpatialConvolutionBackwardWeight(ptr *Ctensor, input Ctensor, weightsizeData []int64, weightsizeLen int, gradOutput Ctensor, paddingData []int64, paddingLen int){ +cweightsizeDataPtr := (*C.int64_t)(unsafe.Pointer(&weightsizeData[0])) +cweightsizeLen := *(*C.int)(unsafe.Pointer(&weightsizeLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +C.atg__nnpack_spatial_convolution_backward_weight(ptr, input, cweightsizeDataPtr, cweightsizeLen, gradOutput, cpaddingDataPtr, cpaddingLen) +} +func Atg_PackPaddedSequence(ptr *Ctensor, input Ctensor, lengths Ctensor, batchFirst int32){ +cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst)) +C.atg__pack_padded_sequence(ptr, input, lengths, cbatchFirst) +} +func Atg_PackPaddedSequenceBackward(ptr *Ctensor, grad Ctensor, inputSizeData []int64, inputSizeLen int, batchSizes Ctensor, batchFirst int32){ +cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) +cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) +cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst)) +C.atg__pack_padded_sequence_backward(ptr, grad, cinputSizeDataPtr, cinputSizeLen, batchSizes, cbatchFirst) +} +func Atg_PadPackedSequence(ptr *Ctensor, data Ctensor, batchSizes Ctensor, batchFirst int32, paddingValue Cscalar, totalLength int64){ +cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst)) +ctotalLength := *(*C.int64_t)(unsafe.Pointer(&totalLength)) +C.atg__pad_packed_sequence(ptr, data, batchSizes, cbatchFirst, paddingValue , ctotalLength) +} +func Atg_PdistBackward(ptr *Ctensor, grad Ctensor, self Ctensor, p float64, pdist Ctensor){ +cp := *(*C.double)(unsafe.Pointer(&p)) +C.atg__pdist_backward(ptr, grad, self, cp, pdist) +} +func Atg_QrHelper(ptr *Ctensor, self Ctensor, some int32){ +csome := *(*C.int)(unsafe.Pointer(&some)) +C.atg__qr_helper(ptr, self, csome) +} +func Atg_RemoveBatchDim(ptr *Ctensor, self Ctensor, level int64, batchSize int64, outDim int64){ +clevel := *(*C.int64_t)(unsafe.Pointer(&level)) +cbatchSize := *(*C.int64_t)(unsafe.Pointer(&batchSize)) +coutDim := *(*C.int64_t)(unsafe.Pointer(&outDim)) +C.atg__remove_batch_dim(ptr, self, clevel, cbatchSize, coutDim) +} +func Atg_ReshapeFromTensor(ptr *Ctensor, self Ctensor, shape Ctensor){ +C.atg__reshape_from_tensor(ptr, self, shape) +} +func Atg_SWhere(ptr *Ctensor, condition Ctensor, self Ctensor, other Ctensor){ +C.atg__s_where(ptr, condition, self, other) +} +func Atg_SampleDirichlet(ptr *Ctensor, self Ctensor){ +C.atg__sample_dirichlet(ptr, self) +} +func Atg_SaturateWeightToFp16(ptr *Ctensor, weight Ctensor){ +C.atg__saturate_weight_to_fp16(ptr, weight) +} +func Atg_ShapeAsTensor(ptr *Ctensor, self Ctensor){ +C.atg__shape_as_tensor(ptr, self) +} +func Atg_SobolEngineDraw(ptr *Ctensor, quasi Ctensor, n int64, sobolstate Ctensor, dimension int64, numGenerated int64, dtype int32){ +cn := *(*C.int64_t)(unsafe.Pointer(&n)) +cdimension := *(*C.int64_t)(unsafe.Pointer(&dimension)) +cnumGenerated := *(*C.int64_t)(unsafe.Pointer(&numGenerated)) +cdtype := *(*C.int)(unsafe.Pointer(&dtype)) +C.atg__sobol_engine_draw(ptr, quasi, cn, sobolstate, cdimension, cnumGenerated, cdtype) +} +func Atg_SobolEngineFf_(ptr *Ctensor, self Ctensor, n int64, sobolstate Ctensor, dimension int64, numGenerated int64){ +cn := *(*C.int64_t)(unsafe.Pointer(&n)) +cdimension := *(*C.int64_t)(unsafe.Pointer(&dimension)) +cnumGenerated := *(*C.int64_t)(unsafe.Pointer(&numGenerated)) +C.atg__sobol_engine_ff_(ptr, self, cn, sobolstate, cdimension, cnumGenerated) +} +func Atg_SobolEngineInitializeState_(ptr *Ctensor, self Ctensor, dimension int64){ +cdimension := *(*C.int64_t)(unsafe.Pointer(&dimension)) +C.atg__sobol_engine_initialize_state_(ptr, self, cdimension) +} +func Atg_SobolEngineScramble_(ptr *Ctensor, self Ctensor, ltm Ctensor, dimension int64){ +cdimension := *(*C.int64_t)(unsafe.Pointer(&dimension)) +C.atg__sobol_engine_scramble_(ptr, self, ltm, cdimension) +} +func Atg_Softmax(ptr *Ctensor, self Ctensor, dim int64, halfToFloat int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +chalfToFloat := *(*C.int)(unsafe.Pointer(&halfToFloat)) +C.atg__softmax(ptr, self, cdim, chalfToFloat) +} +func Atg_SoftmaxBackwardData(ptr *Ctensor, gradOutput Ctensor, output Ctensor, dim int64, self Ctensor){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg__softmax_backward_data(ptr, gradOutput, output, cdim, self) +} +func Atg_SolveHelper(ptr *Ctensor, self Ctensor, a Ctensor){ +C.atg__solve_helper(ptr, self, a) +} +func Atg_SparseAddmm(ptr *Ctensor, self Ctensor, sparse Ctensor, dense Ctensor){ +C.atg__sparse_addmm(ptr, self, sparse, dense) +} +func Atg_SparseCooTensorUnsafe(ptr *Ctensor, indices Ctensor, values Ctensor, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32){ +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg__sparse_coo_tensor_unsafe(ptr, indices, values, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice) +} +func Atg_SparseCooTensorWithDims(ptr *Ctensor, sparseDim int64, denseDim int64, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32){ +csparseDim := *(*C.int64_t)(unsafe.Pointer(&sparseDim)) +cdenseDim := *(*C.int64_t)(unsafe.Pointer(&denseDim)) +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg__sparse_coo_tensor_with_dims(ptr, csparseDim, cdenseDim, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice) +} +func Atg_SparseCooTensorWithDimsAndTensors(ptr *Ctensor, sparseDim int64, denseDim int64, sizeData []int64, sizeLen int, indices Ctensor, values Ctensor, optionsKind int32, optionsDevice int32){ +csparseDim := *(*C.int64_t)(unsafe.Pointer(&sparseDim)) +cdenseDim := *(*C.int64_t)(unsafe.Pointer(&denseDim)) +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg__sparse_coo_tensor_with_dims_and_tensors(ptr, csparseDim, cdenseDim, csizeDataPtr, csizeLen, indices, values, coptionsKind, coptionsDevice) +} +func Atg_SparseLogSoftmax(ptr *Ctensor, self Ctensor, dim int64, dtype int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +cdtype := *(*C.int)(unsafe.Pointer(&dtype)) +C.atg__sparse_log_softmax(ptr, self, cdim, cdtype) +} +func Atg_SparseLogSoftmax1(ptr *Ctensor, self Ctensor, dim int64, halfToFloat int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +chalfToFloat := *(*C.int)(unsafe.Pointer(&halfToFloat)) +C.atg__sparse_log_softmax1(ptr, self, cdim, chalfToFloat) +} +func Atg_SparseLogSoftmaxBackwardData(ptr *Ctensor, gradOutput Ctensor, output Ctensor, dim int64, self Ctensor){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg__sparse_log_softmax_backward_data(ptr, gradOutput, output, cdim, self) +} +func Atg_SparseMm(ptr *Ctensor, sparse Ctensor, dense Ctensor){ +C.atg__sparse_mm(ptr, sparse, dense) +} +func Atg_SparseSoftmax(ptr *Ctensor, self Ctensor, dim int64, dtype int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +cdtype := *(*C.int)(unsafe.Pointer(&dtype)) +C.atg__sparse_softmax(ptr, self, cdim, cdtype) +} +func Atg_SparseSoftmax1(ptr *Ctensor, self Ctensor, dim int64, halfToFloat int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +chalfToFloat := *(*C.int)(unsafe.Pointer(&halfToFloat)) +C.atg__sparse_softmax1(ptr, self, cdim, chalfToFloat) +} +func Atg_SparseSoftmaxBackwardData(ptr *Ctensor, gradOutput Ctensor, output Ctensor, dim int64, self Ctensor){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg__sparse_softmax_backward_data(ptr, gradOutput, output, cdim, self) +} +func Atg_SparseSum(ptr *Ctensor, self Ctensor){ +C.atg__sparse_sum(ptr, self) +} +func Atg_SparseSum1(ptr *Ctensor, self Ctensor, dtype int32){ +cdtype := *(*C.int)(unsafe.Pointer(&dtype)) +C.atg__sparse_sum1(ptr, self, cdtype) +} +func Atg_SparseSum2(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int){ +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +C.atg__sparse_sum2(ptr, self, cdimDataPtr, cdimLen) +} +func Atg_SparseSum3(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, dtype int32){ +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +cdtype := *(*C.int)(unsafe.Pointer(&dtype)) +C.atg__sparse_sum3(ptr, self, cdimDataPtr, cdimLen, cdtype) +} +func Atg_SparseSumBackward(ptr *Ctensor, grad Ctensor, self Ctensor, dimData []int64, dimLen int){ +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +C.atg__sparse_sum_backward(ptr, grad, self, cdimDataPtr, cdimLen) +} +func Atg_StandardGamma(ptr *Ctensor, self Ctensor){ +C.atg__standard_gamma(ptr, self) +} +func Atg_StandardGammaGrad(ptr *Ctensor, self Ctensor, output Ctensor){ +C.atg__standard_gamma_grad(ptr, self, output) +} +func Atg_Std(ptr *Ctensor, self Ctensor, unbiased int32){ +cunbiased := *(*C.int)(unsafe.Pointer(&unbiased)) +C.atg__std(ptr, self, cunbiased) +} +func Atg_SvdHelper(ptr *Ctensor, self Ctensor, some int32, computeUv int32){ +csome := *(*C.int)(unsafe.Pointer(&some)) +ccomputeUv := *(*C.int)(unsafe.Pointer(&computeUv)) +C.atg__svd_helper(ptr, self, csome, ccomputeUv) +} +func Atg_SymeigHelper(ptr *Ctensor, self Ctensor, eigenvectors int32, upper int32){ +ceigenvectors := *(*C.int)(unsafe.Pointer(&eigenvectors)) +cupper := *(*C.int)(unsafe.Pointer(&upper)) +C.atg__symeig_helper(ptr, self, ceigenvectors, cupper) +} +func Atg_TestOptionalFilledIntlist(ptr *Ctensor, values Ctensor, addendsData []int64, addendsLen int){ +caddendsDataPtr := (*C.int64_t)(unsafe.Pointer(&addendsData[0])) +caddendsLen := *(*C.int)(unsafe.Pointer(&addendsLen)) +C.atg__test_optional_filled_intlist(ptr, values, caddendsDataPtr, caddendsLen) +} +func Atg_TestOptionalIntlist(ptr *Ctensor, values Ctensor, addendsData []int64, addendsLen int){ +caddendsDataPtr := (*C.int64_t)(unsafe.Pointer(&addendsData[0])) +caddendsLen := *(*C.int)(unsafe.Pointer(&addendsLen)) +C.atg__test_optional_intlist(ptr, values, caddendsDataPtr, caddendsLen) +} +func Atg_TestSerializationSubcmul(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg__test_serialization_subcmul(ptr, self, other) +} +func Atg_TriangularSolveHelper(ptr *Ctensor, self Ctensor, a Ctensor, upper int32, transpose int32, unitriangular int32){ +cupper := *(*C.int)(unsafe.Pointer(&upper)) +ctranspose := *(*C.int)(unsafe.Pointer(&transpose)) +cunitriangular := *(*C.int)(unsafe.Pointer(&unitriangular)) +C.atg__triangular_solve_helper(ptr, self, a, cupper, ctranspose, cunitriangular) +} +func Atg_Trilinear(ptr *Ctensor, i1 Ctensor, i2 Ctensor, i3 Ctensor, expand1Data []int64, expand1Len int, expand2Data []int64, expand2Len int, expand3Data []int64, expand3Len int, sumdimData []int64, sumdimLen int, unrollDim int64){ +cexpand1DataPtr := (*C.int64_t)(unsafe.Pointer(&expand1Data[0])) +cexpand1Len := *(*C.int)(unsafe.Pointer(&expand1Len)) +cexpand2DataPtr := (*C.int64_t)(unsafe.Pointer(&expand2Data[0])) +cexpand2Len := *(*C.int)(unsafe.Pointer(&expand2Len)) +cexpand3DataPtr := (*C.int64_t)(unsafe.Pointer(&expand3Data[0])) +cexpand3Len := *(*C.int)(unsafe.Pointer(&expand3Len)) +csumdimDataPtr := (*C.int64_t)(unsafe.Pointer(&sumdimData[0])) +csumdimLen := *(*C.int)(unsafe.Pointer(&sumdimLen)) +cunrollDim := *(*C.int64_t)(unsafe.Pointer(&unrollDim)) +C.atg__trilinear(ptr, i1, i2, i3, cexpand1DataPtr, cexpand1Len, cexpand2DataPtr, cexpand2Len, cexpand3DataPtr, cexpand3Len, csumdimDataPtr, csumdimLen, cunrollDim) +} +func Atg_Unique(ptr *Ctensor, self Ctensor, sorted int32, returnInverse int32){ +csorted := *(*C.int)(unsafe.Pointer(&sorted)) +creturnInverse := *(*C.int)(unsafe.Pointer(&returnInverse)) +C.atg__unique(ptr, self, csorted, creturnInverse) +} +func Atg_Unique2(ptr *Ctensor, self Ctensor, sorted int32, returnInverse int32, returnCounts int32){ +csorted := *(*C.int)(unsafe.Pointer(&sorted)) +creturnInverse := *(*C.int)(unsafe.Pointer(&returnInverse)) +creturnCounts := *(*C.int)(unsafe.Pointer(&returnCounts)) +C.atg__unique2(ptr, self, csorted, creturnInverse, creturnCounts) +} +func Atg_UnsafeView(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int){ +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +C.atg__unsafe_view(ptr, self, csizeDataPtr, csizeLen) +} +func Atg_Values(ptr *Ctensor, self Ctensor){ +C.atg__values(ptr, self) +} +func Atg_Var(ptr *Ctensor, self Ctensor, unbiased int32){ +cunbiased := *(*C.int)(unsafe.Pointer(&unbiased)) +C.atg__var(ptr, self, cunbiased) +} +func Atg_WeightNorm(ptr *Ctensor, v Ctensor, g Ctensor, dim int64){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg__weight_norm(ptr, v, g, cdim) +} +func Atg_WeightNormCudaInterface(ptr *Ctensor, v Ctensor, g Ctensor, dim int64){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg__weight_norm_cuda_interface(ptr, v, g, cdim) +} +func Atg_WeightNormCudaInterfaceBackward(ptr *Ctensor, gradW Ctensor, savedV Ctensor, savedG Ctensor, savedNorms Ctensor, dim int64){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg__weight_norm_cuda_interface_backward(ptr, gradW, savedV, savedG, savedNorms, cdim) +} +func Atg_WeightNormDifferentiableBackward(ptr *Ctensor, gradW Ctensor, savedV Ctensor, savedG Ctensor, savedNorms Ctensor, dim int64){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg__weight_norm_differentiable_backward(ptr, gradW, savedV, savedG, savedNorms, cdim) +} +func AtgAbs(ptr *Ctensor, self Ctensor){ +C.atg_abs(ptr, self) +} +func AtgAbs_(ptr *Ctensor, self Ctensor){ +C.atg_abs_(ptr, self) +} +func AtgAbsOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_abs_out(ptr, out, self) +} +func AtgAbsolute(ptr *Ctensor, self Ctensor){ +C.atg_absolute(ptr, self) +} +func AtgAbsolute_(ptr *Ctensor, self Ctensor){ +C.atg_absolute_(ptr, self) +} +func AtgAbsoluteOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_absolute_out(ptr, out, self) +} +func AtgAcos(ptr *Ctensor, self Ctensor){ +C.atg_acos(ptr, self) +} +func AtgAcos_(ptr *Ctensor, self Ctensor){ +C.atg_acos_(ptr, self) } -func AtgAdaptiveMaxPool2d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int) { - coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) - coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) - C.atg_adaptive_max_pool2d(ptr, self, coutputSizeDataPtr, coutputSizeLen) +func AtgAcosOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_acos_out(ptr, out, self) } -func AtgAdaptiveMaxPool2dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, indices Ctensor) { - C.atg_adaptive_max_pool2d_backward(ptr, gradOutput, self, indices) +func AtgAcosh(ptr *Ctensor, self Ctensor){ +C.atg_acosh(ptr, self) } -func AtgAdaptiveMaxPool2dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, indices Ctensor) { - C.atg_adaptive_max_pool2d_backward_out(ptr, gradInput, gradOutput, self, indices) +func AtgAcosh_(ptr *Ctensor, self Ctensor){ +C.atg_acosh_(ptr, self) } -func AtgAdaptiveMaxPool2dOut(ptr *Ctensor, out Ctensor, indices Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int) { - coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) - coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) - C.atg_adaptive_max_pool2d_out(ptr, out, indices, self, coutputSizeDataPtr, coutputSizeLen) +func AtgAcoshOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_acosh_out(ptr, out, self) } -func AtgAdaptiveMaxPool3d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int) { - coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) - coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) - C.atg_adaptive_max_pool3d(ptr, self, coutputSizeDataPtr, coutputSizeLen) +func AtgAdaptiveAvgPool1d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +C.atg_adaptive_avg_pool1d(ptr, self, coutputSizeDataPtr, coutputSizeLen) } -func AtgAdaptiveMaxPool3dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, indices Ctensor) { - C.atg_adaptive_max_pool3d_backward(ptr, gradOutput, self, indices) +func AtgAdaptiveAvgPool2d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +C.atg_adaptive_avg_pool2d(ptr, self, coutputSizeDataPtr, coutputSizeLen) } -func AtgAdaptiveMaxPool3dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, indices Ctensor) { - C.atg_adaptive_max_pool3d_backward_out(ptr, gradInput, gradOutput, self, indices) +func AtgAdaptiveAvgPool2dOut(ptr *Ctensor, out Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +C.atg_adaptive_avg_pool2d_out(ptr, out, self, coutputSizeDataPtr, coutputSizeLen) } -func AtgAdaptiveMaxPool3dOut(ptr *Ctensor, out Ctensor, indices Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int) { - coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) - coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) - C.atg_adaptive_max_pool3d_out(ptr, out, indices, self, coutputSizeDataPtr, coutputSizeLen) +func AtgAdaptiveAvgPool3d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +C.atg_adaptive_avg_pool3d(ptr, self, coutputSizeDataPtr, coutputSizeLen) } -func AtgAdd(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg_add(ptr, self, other) +func AtgAdaptiveAvgPool3dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor){ +C.atg_adaptive_avg_pool3d_backward(ptr, gradOutput, self) } -func AtgAdd1(ptr *Ctensor, self Ctensor, other Cscalar) { - C.atg_add1(ptr, self, other) +func AtgAdaptiveAvgPool3dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor){ +C.atg_adaptive_avg_pool3d_backward_out(ptr, gradInput, gradOutput, self) } -func AtgAdd_(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg_add_(ptr, self, other) +func AtgAdaptiveAvgPool3dOut(ptr *Ctensor, out Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +C.atg_adaptive_avg_pool3d_out(ptr, out, self, coutputSizeDataPtr, coutputSizeLen) } -func AtgAdd1_(ptr *Ctensor, self Ctensor, other Cscalar) { - C.atg_add_1(ptr, self, other) +func AtgAdaptiveMaxPool1d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +C.atg_adaptive_max_pool1d(ptr, self, coutputSizeDataPtr, coutputSizeLen) } -func AtgAddOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor) { - C.atg_add_out(ptr, out, self, other) +func AtgAdaptiveMaxPool2d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +C.atg_adaptive_max_pool2d(ptr, self, coutputSizeDataPtr, coutputSizeLen) } -func AtgAddbmm(ptr *Ctensor, self Ctensor, batch1 Ctensor, batch2 Ctensor) { - C.atg_addbmm(ptr, self, batch1, batch2) +func AtgAdaptiveMaxPool2dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, indices Ctensor){ +C.atg_adaptive_max_pool2d_backward(ptr, gradOutput, self, indices) } -func AtgAddbmm_(ptr *Ctensor, self Ctensor, batch1 Ctensor, batch2 Ctensor) { - C.atg_addbmm_(ptr, self, batch1, batch2) +func AtgAdaptiveMaxPool2dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, indices Ctensor){ +C.atg_adaptive_max_pool2d_backward_out(ptr, gradInput, gradOutput, self, indices) } -func AtgAddbmmOut(ptr *Ctensor, out Ctensor, self Ctensor, batch1 Ctensor, batch2 Ctensor) { - C.atg_addbmm_out(ptr, out, self, batch1, batch2) +func AtgAdaptiveMaxPool2dOut(ptr *Ctensor, out Ctensor, indices Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +C.atg_adaptive_max_pool2d_out(ptr, out, indices, self, coutputSizeDataPtr, coutputSizeLen) } -func AtgAddcdiv(ptr *Ctensor, self Ctensor, tensor1 Ctensor, tensor2 Ctensor) { - C.atg_addcdiv(ptr, self, tensor1, tensor2) +func AtgAdaptiveMaxPool3d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +C.atg_adaptive_max_pool3d(ptr, self, coutputSizeDataPtr, coutputSizeLen) } -func AtgAddcdiv_(ptr *Ctensor, self Ctensor, tensor1 Ctensor, tensor2 Ctensor) { - C.atg_addcdiv_(ptr, self, tensor1, tensor2) +func AtgAdaptiveMaxPool3dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, indices Ctensor){ +C.atg_adaptive_max_pool3d_backward(ptr, gradOutput, self, indices) } -func AtgAddcdivOut(ptr *Ctensor, out Ctensor, self Ctensor, tensor1 Ctensor, tensor2 Ctensor) { - C.atg_addcdiv_out(ptr, out, self, tensor1, tensor2) +func AtgAdaptiveMaxPool3dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, indices Ctensor){ +C.atg_adaptive_max_pool3d_backward_out(ptr, gradInput, gradOutput, self, indices) } -func AtgAddcmul(ptr *Ctensor, self Ctensor, tensor1 Ctensor, tensor2 Ctensor) { - C.atg_addcmul(ptr, self, tensor1, tensor2) +func AtgAdaptiveMaxPool3dOut(ptr *Ctensor, out Ctensor, indices Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +C.atg_adaptive_max_pool3d_out(ptr, out, indices, self, coutputSizeDataPtr, coutputSizeLen) } -func AtgAddcmul_(ptr *Ctensor, self Ctensor, tensor1 Ctensor, tensor2 Ctensor) { - C.atg_addcmul_(ptr, self, tensor1, tensor2) +func AtgAdd(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_add(ptr, self, other) } -func AtgAddcmulOut(ptr *Ctensor, out Ctensor, self Ctensor, tensor1 Ctensor, tensor2 Ctensor) { - C.atg_addcmul_out(ptr, out, self, tensor1, tensor2) +func AtgAdd1(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_add1(ptr, self, other ) } -func AtgAddmm(ptr *Ctensor, self Ctensor, mat1 Ctensor, mat2 Ctensor) { - C.atg_addmm(ptr, self, mat1, mat2) +func AtgAdd_(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_add_(ptr, self, other) } -func AtgAddmm_(ptr *Ctensor, self Ctensor, mat1 Ctensor, mat2 Ctensor) { - C.atg_addmm_(ptr, self, mat1, mat2) +func AtgAdd1_(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_add_1(ptr, self, other ) } -func AtgAddmmOut(ptr *Ctensor, out Ctensor, self Ctensor, mat1 Ctensor, mat2 Ctensor) { - C.atg_addmm_out(ptr, out, self, mat1, mat2) +func AtgAddOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ +C.atg_add_out(ptr, out, self, other) } -func AtgAddmv(ptr *Ctensor, self Ctensor, mat Ctensor, vec Ctensor) { - C.atg_addmv(ptr, self, mat, vec) +func AtgAddbmm(ptr *Ctensor, self Ctensor, batch1 Ctensor, batch2 Ctensor){ +C.atg_addbmm(ptr, self, batch1, batch2) } -func AtgAddmv_(ptr *Ctensor, self Ctensor, mat Ctensor, vec Ctensor) { - C.atg_addmv_(ptr, self, mat, vec) +func AtgAddbmm_(ptr *Ctensor, self Ctensor, batch1 Ctensor, batch2 Ctensor){ +C.atg_addbmm_(ptr, self, batch1, batch2) } -func AtgAddmvOut(ptr *Ctensor, out Ctensor, self Ctensor, mat Ctensor, vec Ctensor) { - C.atg_addmv_out(ptr, out, self, mat, vec) +func AtgAddbmmOut(ptr *Ctensor, out Ctensor, self Ctensor, batch1 Ctensor, batch2 Ctensor){ +C.atg_addbmm_out(ptr, out, self, batch1, batch2) } -func AtgAddr(ptr *Ctensor, self Ctensor, vec1 Ctensor, vec2 Ctensor) { - C.atg_addr(ptr, self, vec1, vec2) +func AtgAddcdiv(ptr *Ctensor, self Ctensor, tensor1 Ctensor, tensor2 Ctensor){ +C.atg_addcdiv(ptr, self, tensor1, tensor2) } -func AtgAddr_(ptr *Ctensor, self Ctensor, vec1 Ctensor, vec2 Ctensor) { - C.atg_addr_(ptr, self, vec1, vec2) +func AtgAddcdiv_(ptr *Ctensor, self Ctensor, tensor1 Ctensor, tensor2 Ctensor){ +C.atg_addcdiv_(ptr, self, tensor1, tensor2) } -func AtgAddrOut(ptr *Ctensor, out Ctensor, self Ctensor, vec1 Ctensor, vec2 Ctensor) { - C.atg_addr_out(ptr, out, self, vec1, vec2) +func AtgAddcdivOut(ptr *Ctensor, out Ctensor, self Ctensor, tensor1 Ctensor, tensor2 Ctensor){ +C.atg_addcdiv_out(ptr, out, self, tensor1, tensor2) } -func AtgAffineGridGenerator(ptr *Ctensor, theta Ctensor, sizeData []int64, sizeLen int, alignCorners int32) { - csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) - csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) - calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) - C.atg_affine_grid_generator(ptr, theta, csizeDataPtr, csizeLen, calignCorners) +func AtgAddcmul(ptr *Ctensor, self Ctensor, tensor1 Ctensor, tensor2 Ctensor){ +C.atg_addcmul(ptr, self, tensor1, tensor2) } -func AtgAffineGridGeneratorBackward(ptr *Ctensor, grad Ctensor, sizeData []int64, sizeLen int, alignCorners int32) { - csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) - csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) - calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) - C.atg_affine_grid_generator_backward(ptr, grad, csizeDataPtr, csizeLen, calignCorners) +func AtgAddcmul_(ptr *Ctensor, self Ctensor, tensor1 Ctensor, tensor2 Ctensor){ +C.atg_addcmul_(ptr, self, tensor1, tensor2) } -func AtgAlias(ptr *Ctensor, self Ctensor) { - C.atg_alias(ptr, self) +func AtgAddcmulOut(ptr *Ctensor, out Ctensor, self Ctensor, tensor1 Ctensor, tensor2 Ctensor){ +C.atg_addcmul_out(ptr, out, self, tensor1, tensor2) } -func AtgAlignAs(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg_align_as(ptr, self, other) +func AtgAddmm(ptr *Ctensor, self Ctensor, mat1 Ctensor, mat2 Ctensor){ +C.atg_addmm(ptr, self, mat1, mat2) +} +func AtgAddmm_(ptr *Ctensor, self Ctensor, mat1 Ctensor, mat2 Ctensor){ +C.atg_addmm_(ptr, self, mat1, mat2) +} +func AtgAddmmOut(ptr *Ctensor, out Ctensor, self Ctensor, mat1 Ctensor, mat2 Ctensor){ +C.atg_addmm_out(ptr, out, self, mat1, mat2) +} +func AtgAddmv(ptr *Ctensor, self Ctensor, mat Ctensor, vec Ctensor){ +C.atg_addmv(ptr, self, mat, vec) +} +func AtgAddmv_(ptr *Ctensor, self Ctensor, mat Ctensor, vec Ctensor){ +C.atg_addmv_(ptr, self, mat, vec) +} +func AtgAddmvOut(ptr *Ctensor, out Ctensor, self Ctensor, mat Ctensor, vec Ctensor){ +C.atg_addmv_out(ptr, out, self, mat, vec) +} +func AtgAddr(ptr *Ctensor, self Ctensor, vec1 Ctensor, vec2 Ctensor){ +C.atg_addr(ptr, self, vec1, vec2) +} +func AtgAddr_(ptr *Ctensor, self Ctensor, vec1 Ctensor, vec2 Ctensor){ +C.atg_addr_(ptr, self, vec1, vec2) +} +func AtgAddrOut(ptr *Ctensor, out Ctensor, self Ctensor, vec1 Ctensor, vec2 Ctensor){ +C.atg_addr_out(ptr, out, self, vec1, vec2) +} +func AtgAffineGridGenerator(ptr *Ctensor, theta Ctensor, sizeData []int64, sizeLen int, alignCorners int32){ +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) +C.atg_affine_grid_generator(ptr, theta, csizeDataPtr, csizeLen, calignCorners) +} +func AtgAffineGridGeneratorBackward(ptr *Ctensor, grad Ctensor, sizeData []int64, sizeLen int, alignCorners int32){ +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) +C.atg_affine_grid_generator_backward(ptr, grad, csizeDataPtr, csizeLen, calignCorners) +} +func AtgAlias(ptr *Ctensor, self Ctensor){ +C.atg_alias(ptr, self) +} +func AtgAlignAs(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_align_as(ptr, self, other) } -func AtgAll(ptr *Ctensor, self Ctensor) { - C.atg_all(ptr, self) -} -func AtgAll1(ptr *Ctensor, self Ctensor, dim int64, keepdim int32) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) - C.atg_all1(ptr, self, cdim, ckeepdim) -} -func AtgAllOut(ptr *Ctensor, out Ctensor, self Ctensor, dim int64, keepdim int32) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) - C.atg_all_out(ptr, out, self, cdim, ckeepdim) -} -func AtgAlphaDropout(ptr *Ctensor, input Ctensor, p float64, train int32) { - cp := *(*C.double)(unsafe.Pointer(&p)) - ctrain := *(*C.int)(unsafe.Pointer(&train)) - C.atg_alpha_dropout(ptr, input, cp, ctrain) -} -func AtgAlphaDropout_(ptr *Ctensor, self Ctensor, p float64, train int32) { - cp := *(*C.double)(unsafe.Pointer(&p)) - ctrain := *(*C.int)(unsafe.Pointer(&train)) - C.atg_alpha_dropout_(ptr, self, cp, ctrain) -} -func AtgAngle(ptr *Ctensor, self Ctensor) { - C.atg_angle(ptr, self) -} -func AtgAngleOut(ptr *Ctensor, out Ctensor, self Ctensor) { - C.atg_angle_out(ptr, out, self) -} -func AtgAny(ptr *Ctensor, self Ctensor) { - C.atg_any(ptr, self) -} -func AtgAny1(ptr *Ctensor, self Ctensor, dim int64, keepdim int32) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) - C.atg_any1(ptr, self, cdim, ckeepdim) -} -func AtgAnyOut(ptr *Ctensor, out Ctensor, self Ctensor, dim int64, keepdim int32) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) - C.atg_any_out(ptr, out, self, cdim, ckeepdim) -} -func AtgArange(ptr *Ctensor, end Cscalar, optionsKind int32, optionsDevice int32) { - coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) - coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) - C.atg_arange(ptr, end, coptionsKind, coptionsDevice) -} -func AtgArange1(ptr *Ctensor, start Cscalar, end Cscalar, optionsKind int32, optionsDevice int32) { - coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) - coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) - C.atg_arange1(ptr, start, end, coptionsKind, coptionsDevice) -} -func AtgArange2(ptr *Ctensor, start Cscalar, end Cscalar, step Cscalar, optionsKind int32, optionsDevice int32) { - coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) - coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) - C.atg_arange2(ptr, start, end, step, coptionsKind, coptionsDevice) -} -func AtgArangeOut(ptr *Ctensor, out Ctensor, end Cscalar) { - C.atg_arange_out(ptr, out, end) -} -func AtgArangeOut1(ptr *Ctensor, out Ctensor, start Cscalar, end Cscalar) { - C.atg_arange_out1(ptr, out, start, end) -} -func AtgArgmax(ptr *Ctensor, self Ctensor, dim int64, keepdim int32) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) - C.atg_argmax(ptr, self, cdim, ckeepdim) -} -func AtgArgmin(ptr *Ctensor, self Ctensor, dim int64, keepdim int32) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) - C.atg_argmin(ptr, self, cdim, ckeepdim) -} -func AtgArgsort(ptr *Ctensor, self Ctensor, dim int64, descending int32) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - cdescending := *(*C.int)(unsafe.Pointer(&descending)) - C.atg_argsort(ptr, self, cdim, cdescending) -} -func AtgAsStrided(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int, strideData []int64, strideLen int, storageOffset int64) { - csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) - csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cstorageOffset := *(*C.int64_t)(unsafe.Pointer(&storageOffset)) - C.atg_as_strided(ptr, self, csizeDataPtr, csizeLen, cstrideDataPtr, cstrideLen, cstorageOffset) -} -func AtgAsStrided_(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int, strideData []int64, strideLen int, storageOffset int64) { - csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) - csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cstorageOffset := *(*C.int64_t)(unsafe.Pointer(&storageOffset)) - C.atg_as_strided_(ptr, self, csizeDataPtr, csizeLen, cstrideDataPtr, cstrideLen, cstorageOffset) -} -func AtgAsin(ptr *Ctensor, self Ctensor) { - C.atg_asin(ptr, self) -} -func AtgAsin_(ptr *Ctensor, self Ctensor) { - C.atg_asin_(ptr, self) -} -func AtgAsinOut(ptr *Ctensor, out Ctensor, self Ctensor) { - C.atg_asin_out(ptr, out, self) -} -func AtgAtan(ptr *Ctensor, self Ctensor) { - C.atg_atan(ptr, self) -} -func AtgAtan2(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg_atan2(ptr, self, other) -} -func AtgAtan2_(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg_atan2_(ptr, self, other) -} -func AtgAtan2Out(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor) { - C.atg_atan2_out(ptr, out, self, other) -} -func AtgAtan_(ptr *Ctensor, self Ctensor) { - C.atg_atan_(ptr, self) -} -func AtgAtanOut(ptr *Ctensor, out Ctensor, self Ctensor) { - C.atg_atan_out(ptr, out, self) -} -func AtgAvgPool1d(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, ceilMode int32, countIncludePad int32) { - ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) - ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) - ccountIncludePad := *(*C.int)(unsafe.Pointer(&countIncludePad)) - C.atg_avg_pool1d(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cceilMode, ccountIncludePad) -} -func AtgAvgPool2d(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, ceilMode int32, countIncludePad int32, divisorOverride int64) { - ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) - ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) - ccountIncludePad := *(*C.int)(unsafe.Pointer(&countIncludePad)) - cdivisorOverride := *(*C.int64_t)(unsafe.Pointer(&divisorOverride)) - C.atg_avg_pool2d(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cceilMode, ccountIncludePad, cdivisorOverride) -} -func AtgAvgPool2dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, ceilMode int32, countIncludePad int32, divisorOverride int64) { - ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) - ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) - ccountIncludePad := *(*C.int)(unsafe.Pointer(&countIncludePad)) - cdivisorOverride := *(*C.int64_t)(unsafe.Pointer(&divisorOverride)) - C.atg_avg_pool2d_backward(ptr, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cceilMode, ccountIncludePad, cdivisorOverride) -} -func AtgAvgPool2dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, ceilMode int32, countIncludePad int32, divisorOverride int64) { - ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) - ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) - ccountIncludePad := *(*C.int)(unsafe.Pointer(&countIncludePad)) - cdivisorOverride := *(*C.int64_t)(unsafe.Pointer(&divisorOverride)) - C.atg_avg_pool2d_backward_out(ptr, gradInput, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cceilMode, ccountIncludePad, cdivisorOverride) -} -func AtgAvgPool2dOut(ptr *Ctensor, out Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, ceilMode int32, countIncludePad int32, divisorOverride int64) { - ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) - ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) - ccountIncludePad := *(*C.int)(unsafe.Pointer(&countIncludePad)) - cdivisorOverride := *(*C.int64_t)(unsafe.Pointer(&divisorOverride)) - C.atg_avg_pool2d_out(ptr, out, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cceilMode, ccountIncludePad, cdivisorOverride) -} -func AtgAvgPool3d(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, ceilMode int32, countIncludePad int32, divisorOverride int64) { - ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) - ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) - ccountIncludePad := *(*C.int)(unsafe.Pointer(&countIncludePad)) - cdivisorOverride := *(*C.int64_t)(unsafe.Pointer(&divisorOverride)) - C.atg_avg_pool3d(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cceilMode, ccountIncludePad, cdivisorOverride) -} -func AtgAvgPool3dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, ceilMode int32, countIncludePad int32, divisorOverride int64) { - ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) - ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) - ccountIncludePad := *(*C.int)(unsafe.Pointer(&countIncludePad)) - cdivisorOverride := *(*C.int64_t)(unsafe.Pointer(&divisorOverride)) - C.atg_avg_pool3d_backward(ptr, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cceilMode, ccountIncludePad, cdivisorOverride) -} -func AtgAvgPool3dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, ceilMode int32, countIncludePad int32, divisorOverride int64) { - ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) - ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) - ccountIncludePad := *(*C.int)(unsafe.Pointer(&countIncludePad)) - cdivisorOverride := *(*C.int64_t)(unsafe.Pointer(&divisorOverride)) - C.atg_avg_pool3d_backward_out(ptr, gradInput, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cceilMode, ccountIncludePad, cdivisorOverride) -} -func AtgAvgPool3dOut(ptr *Ctensor, out Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, ceilMode int32, countIncludePad int32, divisorOverride int64) { - ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) - ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) - ccountIncludePad := *(*C.int)(unsafe.Pointer(&countIncludePad)) - cdivisorOverride := *(*C.int64_t)(unsafe.Pointer(&divisorOverride)) - C.atg_avg_pool3d_out(ptr, out, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cceilMode, ccountIncludePad, cdivisorOverride) -} -func AtgBaddbmm(ptr *Ctensor, self Ctensor, batch1 Ctensor, batch2 Ctensor) { - C.atg_baddbmm(ptr, self, batch1, batch2) -} -func AtgBaddbmm_(ptr *Ctensor, self Ctensor, batch1 Ctensor, batch2 Ctensor) { - C.atg_baddbmm_(ptr, self, batch1, batch2) -} -func AtgBaddbmmOut(ptr *Ctensor, out Ctensor, self Ctensor, batch1 Ctensor, batch2 Ctensor) { - C.atg_baddbmm_out(ptr, out, self, batch1, batch2) -} -func AtgBartlettWindow(ptr *Ctensor, windowLength int64, optionsKind int32, optionsDevice int32) { - cwindowLength := *(*C.int64_t)(unsafe.Pointer(&windowLength)) - coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) - coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) - C.atg_bartlett_window(ptr, cwindowLength, coptionsKind, coptionsDevice) -} -func AtgBartlettWindow1(ptr *Ctensor, windowLength int64, periodic int32, optionsKind int32, optionsDevice int32) { - cwindowLength := *(*C.int64_t)(unsafe.Pointer(&windowLength)) - cperiodic := *(*C.int)(unsafe.Pointer(&periodic)) - coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) - coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) - C.atg_bartlett_window1(ptr, cwindowLength, cperiodic, coptionsKind, coptionsDevice) -} -func AtgBatchNorm(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, runningMean Ctensor, runningVar Ctensor, training int32, momentum float64, eps float64, cudnnEnabled int32) { - ctraining := *(*C.int)(unsafe.Pointer(&training)) - cmomentum := *(*C.double)(unsafe.Pointer(&momentum)) - ceps := *(*C.double)(unsafe.Pointer(&eps)) - ccudnnEnabled := *(*C.int)(unsafe.Pointer(&cudnnEnabled)) - C.atg_batch_norm(ptr, input, weight, bias, runningMean, runningVar, ctraining, cmomentum, ceps, ccudnnEnabled) -} -func AtgBatchNormBackwardElemt(ptr *Ctensor, gradOut Ctensor, input Ctensor, mean Ctensor, invstd Ctensor, weight Ctensor, meanDy Ctensor, meanDyXmu Ctensor) { - C.atg_batch_norm_backward_elemt(ptr, gradOut, input, mean, invstd, weight, meanDy, meanDyXmu) -} -func AtgBatchNormBackwardReduce(ptr *Ctensor, gradOut Ctensor, input Ctensor, mean Ctensor, invstd Ctensor, weight Ctensor, inputG int32, weightG int32, biasG int32) { - cinputG := *(*C.int)(unsafe.Pointer(&inputG)) - cweightG := *(*C.int)(unsafe.Pointer(&weightG)) - cbiasG := *(*C.int)(unsafe.Pointer(&biasG)) - C.atg_batch_norm_backward_reduce(ptr, gradOut, input, mean, invstd, weight, cinputG, cweightG, cbiasG) -} -func AtgBatchNormElemt(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, mean Ctensor, invstd Ctensor, eps float64) { - ceps := *(*C.double)(unsafe.Pointer(&eps)) - C.atg_batch_norm_elemt(ptr, input, weight, bias, mean, invstd, ceps) -} -func AtgBatchNormElemtOut(ptr *Ctensor, out Ctensor, input Ctensor, weight Ctensor, bias Ctensor, mean Ctensor, invstd Ctensor, eps float64) { - ceps := *(*C.double)(unsafe.Pointer(&eps)) - C.atg_batch_norm_elemt_out(ptr, out, input, weight, bias, mean, invstd, ceps) -} -func AtgBatchNormGatherStats(ptr *Ctensor, input Ctensor, mean Ctensor, invstd Ctensor, runningMean Ctensor, runningVar Ctensor, momentum float64, eps float64, count int64) { - cmomentum := *(*C.double)(unsafe.Pointer(&momentum)) - ceps := *(*C.double)(unsafe.Pointer(&eps)) - ccount := *(*C.int64_t)(unsafe.Pointer(&count)) - C.atg_batch_norm_gather_stats(ptr, input, mean, invstd, runningMean, runningVar, cmomentum, ceps, ccount) -} -func AtgBatchNormGatherStatsWithCounts(ptr *Ctensor, input Ctensor, mean Ctensor, invstd Ctensor, runningMean Ctensor, runningVar Ctensor, momentum float64, eps float64, countsData []int64, countsLen int) { - cmomentum := *(*C.double)(unsafe.Pointer(&momentum)) - ceps := *(*C.double)(unsafe.Pointer(&eps)) - ccountsDataPtr := (*C.int64_t)(unsafe.Pointer(&countsData[0])) - ccountsLen := *(*C.int)(unsafe.Pointer(&countsLen)) - C.atg_batch_norm_gather_stats_with_counts(ptr, input, mean, invstd, runningMean, runningVar, cmomentum, ceps, ccountsDataPtr, ccountsLen) -} -func AtgBatchNormStats(ptr *Ctensor, input Ctensor, eps float64) { - ceps := *(*C.double)(unsafe.Pointer(&eps)) - C.atg_batch_norm_stats(ptr, input, ceps) -} -func AtgBatchNormUpdateStats(ptr *Ctensor, input Ctensor, runningMean Ctensor, runningVar Ctensor, momentum float64) { - cmomentum := *(*C.double)(unsafe.Pointer(&momentum)) - C.atg_batch_norm_update_stats(ptr, input, runningMean, runningVar, cmomentum) -} -func AtgBernoulli(ptr *Ctensor, self Ctensor) { - C.atg_bernoulli(ptr, self) -} -func AtgBernoulli1(ptr *Ctensor, self Ctensor, p float64) { - cp := *(*C.double)(unsafe.Pointer(&p)) - C.atg_bernoulli1(ptr, self, cp) -} -func AtgBernoulli_(ptr *Ctensor, self Ctensor, p Ctensor) { - C.atg_bernoulli_(ptr, self, p) -} -func AtgBernoulli1_(ptr *Ctensor, self Ctensor, p float64) { - cp := *(*C.double)(unsafe.Pointer(&p)) - C.atg_bernoulli_1(ptr, self, cp) +func AtgAll(ptr *Ctensor, self Ctensor){ +C.atg_all(ptr, self) +} +func AtgAll1(ptr *Ctensor, self Ctensor, dim int64, keepdim int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_all1(ptr, self, cdim, ckeepdim) +} +func AtgAllOut(ptr *Ctensor, out Ctensor, self Ctensor, dim int64, keepdim int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_all_out(ptr, out, self, cdim, ckeepdim) +} +func AtgAlphaDropout(ptr *Ctensor, input Ctensor, p float64, train int32){ +cp := *(*C.double)(unsafe.Pointer(&p)) +ctrain := *(*C.int)(unsafe.Pointer(&train)) +C.atg_alpha_dropout(ptr, input, cp, ctrain) } -func AtgBernoulliOut(ptr *Ctensor, out Ctensor, self Ctensor) { - C.atg_bernoulli_out(ptr, out, self) +func AtgAlphaDropout_(ptr *Ctensor, self Ctensor, p float64, train int32){ +cp := *(*C.double)(unsafe.Pointer(&p)) +ctrain := *(*C.int)(unsafe.Pointer(&train)) +C.atg_alpha_dropout_(ptr, self, cp, ctrain) } -func AtgBilinear(ptr *Ctensor, input1 Ctensor, input2 Ctensor, weight Ctensor, bias Ctensor) { - C.atg_bilinear(ptr, input1, input2, weight, bias) +func AtgAmax(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32){ +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_amax(ptr, self, cdimDataPtr, cdimLen, ckeepdim) } -func AtgBinaryCrossEntropy(ptr *Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64) { - creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) - C.atg_binary_cross_entropy(ptr, self, target, weight, creduction) +func AtgAmaxOut(ptr *Ctensor, out Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32){ +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_amax_out(ptr, out, self, cdimDataPtr, cdimLen, ckeepdim) } -func AtgBinaryCrossEntropyBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64) { - creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) - C.atg_binary_cross_entropy_backward(ptr, gradOutput, self, target, weight, creduction) +func AtgAmin(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32){ +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_amin(ptr, self, cdimDataPtr, cdimLen, ckeepdim) } -func AtgBinaryCrossEntropyBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64) { - creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) - C.atg_binary_cross_entropy_backward_out(ptr, gradInput, gradOutput, self, target, weight, creduction) +func AtgAminOut(ptr *Ctensor, out Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32){ +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_amin_out(ptr, out, self, cdimDataPtr, cdimLen, ckeepdim) } -func AtgBinaryCrossEntropyOut(ptr *Ctensor, out Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64) { - creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) - C.atg_binary_cross_entropy_out(ptr, out, self, target, weight, creduction) +func AtgAngle(ptr *Ctensor, self Ctensor){ +C.atg_angle(ptr, self) } -func AtgBinaryCrossEntropyWithLogits(ptr *Ctensor, self Ctensor, target Ctensor, weight Ctensor, posWeight Ctensor, reduction int64) { - creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) - C.atg_binary_cross_entropy_with_logits(ptr, self, target, weight, posWeight, creduction) +func AtgAngleOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_angle_out(ptr, out, self) } -func AtgBinaryCrossEntropyWithLogitsBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, weight Ctensor, posWeight Ctensor, reduction int64) { - creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) - C.atg_binary_cross_entropy_with_logits_backward(ptr, gradOutput, self, target, weight, posWeight, creduction) +func AtgAny(ptr *Ctensor, self Ctensor){ +C.atg_any(ptr, self) } -func AtgBincount(ptr *Ctensor, self Ctensor, weights Ctensor, minlength int64) { - cminlength := *(*C.int64_t)(unsafe.Pointer(&minlength)) - C.atg_bincount(ptr, self, weights, cminlength) +func AtgAny1(ptr *Ctensor, self Ctensor, dim int64, keepdim int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_any1(ptr, self, cdim, ckeepdim) } -func AtgBitwiseAnd(ptr *Ctensor, self Ctensor, other Cscalar) { - C.atg_bitwise_and(ptr, self, other) +func AtgAnyOut(ptr *Ctensor, out Ctensor, self Ctensor, dim int64, keepdim int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_any_out(ptr, out, self, cdim, ckeepdim) +} +func AtgArange(ptr *Ctensor, end Cscalar, optionsKind int32, optionsDevice int32){ +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_arange(ptr, end , coptionsKind, coptionsDevice) +} +func AtgArange1(ptr *Ctensor, start Cscalar, end Cscalar, optionsKind int32, optionsDevice int32){ +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_arange1(ptr, start , end , coptionsKind, coptionsDevice) +} +func AtgArange2(ptr *Ctensor, start Cscalar, end Cscalar, step Cscalar, optionsKind int32, optionsDevice int32){ +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_arange2(ptr, start , end , step , coptionsKind, coptionsDevice) +} +func AtgArangeOut(ptr *Ctensor, out Ctensor, end Cscalar){ +C.atg_arange_out(ptr, out, end ) +} +func AtgArangeOut1(ptr *Ctensor, out Ctensor, start Cscalar, end Cscalar){ +C.atg_arange_out1(ptr, out, start , end ) +} +func AtgArccos(ptr *Ctensor, self Ctensor){ +C.atg_arccos(ptr, self) +} +func AtgArccos_(ptr *Ctensor, self Ctensor){ +C.atg_arccos_(ptr, self) +} +func AtgArccosOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_arccos_out(ptr, out, self) +} +func AtgArccosh(ptr *Ctensor, self Ctensor){ +C.atg_arccosh(ptr, self) } -func AtgBitwiseAnd1(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg_bitwise_and1(ptr, self, other) +func AtgArccosh_(ptr *Ctensor, self Ctensor){ +C.atg_arccosh_(ptr, self) } -func AtgBitwiseAnd_(ptr *Ctensor, self Ctensor, other Cscalar) { - C.atg_bitwise_and_(ptr, self, other) +func AtgArccoshOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_arccosh_out(ptr, out, self) } -func AtgBitwiseAnd1_(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg_bitwise_and_1(ptr, self, other) +func AtgArcsin(ptr *Ctensor, self Ctensor){ +C.atg_arcsin(ptr, self) } -func AtgBitwiseAndOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor) { - C.atg_bitwise_and_out(ptr, out, self, other) +func AtgArcsin_(ptr *Ctensor, self Ctensor){ +C.atg_arcsin_(ptr, self) } -func AtgBitwiseAndOut1(ptr *Ctensor, out Ctensor, self Ctensor, other Cscalar) { - C.atg_bitwise_and_out1(ptr, out, self, other) +func AtgArcsinOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_arcsin_out(ptr, out, self) } -func AtgBitwiseNot(ptr *Ctensor, self Ctensor) { - C.atg_bitwise_not(ptr, self) +func AtgArcsinh(ptr *Ctensor, self Ctensor){ +C.atg_arcsinh(ptr, self) } -func AtgBitwiseNot_(ptr *Ctensor, self Ctensor) { - C.atg_bitwise_not_(ptr, self) +func AtgArcsinh_(ptr *Ctensor, self Ctensor){ +C.atg_arcsinh_(ptr, self) } -func AtgBitwiseNotOut(ptr *Ctensor, out Ctensor, self Ctensor) { - C.atg_bitwise_not_out(ptr, out, self) +func AtgArcsinhOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_arcsinh_out(ptr, out, self) } -func AtgBitwiseOr(ptr *Ctensor, self Ctensor, other Cscalar) { - C.atg_bitwise_or(ptr, self, other) +func AtgArctan(ptr *Ctensor, self Ctensor){ +C.atg_arctan(ptr, self) } -func AtgBitwiseOr1(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg_bitwise_or1(ptr, self, other) +func AtgArctan_(ptr *Ctensor, self Ctensor){ +C.atg_arctan_(ptr, self) } -func AtgBitwiseOr_(ptr *Ctensor, self Ctensor, other Cscalar) { - C.atg_bitwise_or_(ptr, self, other) +func AtgArctanOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_arctan_out(ptr, out, self) } -func AtgBitwiseOr1_(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg_bitwise_or_1(ptr, self, other) +func AtgArctanh(ptr *Ctensor, self Ctensor){ +C.atg_arctanh(ptr, self) } -func AtgBitwiseOrOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor) { - C.atg_bitwise_or_out(ptr, out, self, other) +func AtgArctanh_(ptr *Ctensor, self Ctensor){ +C.atg_arctanh_(ptr, self) } -func AtgBitwiseOrOut1(ptr *Ctensor, out Ctensor, self Ctensor, other Cscalar) { - C.atg_bitwise_or_out1(ptr, out, self, other) +func AtgArctanhOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_arctanh_out(ptr, out, self) } -func AtgBitwiseXor(ptr *Ctensor, self Ctensor, other Cscalar) { - C.atg_bitwise_xor(ptr, self, other) +func AtgArgmax(ptr *Ctensor, self Ctensor, dimVal int64, dimNull int, keepdim int32){ +cdimVal := *(*C.int64_t)(unsafe.Pointer(&dimVal)) +cdimNull := *(*C.uint8_t)(unsafe.Pointer(&dimNull)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_argmax(ptr, self, cdimVal, cdimNull, ckeepdim) } -func AtgBitwiseXor1(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg_bitwise_xor1(ptr, self, other) +func AtgArgmin(ptr *Ctensor, self Ctensor, dimVal int64, dimNull int, keepdim int32){ +cdimVal := *(*C.int64_t)(unsafe.Pointer(&dimVal)) +cdimNull := *(*C.uint8_t)(unsafe.Pointer(&dimNull)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_argmin(ptr, self, cdimVal, cdimNull, ckeepdim) } -func AtgBitwiseXor_(ptr *Ctensor, self Ctensor, other Cscalar) { - C.atg_bitwise_xor_(ptr, self, other) +func AtgArgsort(ptr *Ctensor, self Ctensor, dim int64, descending int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +cdescending := *(*C.int)(unsafe.Pointer(&descending)) +C.atg_argsort(ptr, self, cdim, cdescending) } -func AtgBitwiseXor1_(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg_bitwise_xor_1(ptr, self, other) +func AtgAsStrided(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int, strideData []int64, strideLen int, storageOffsetVal int64, storageOffsetNull int){ +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cstorageOffsetVal := *(*C.int64_t)(unsafe.Pointer(&storageOffsetVal)) +cstorageOffsetNull := *(*C.uint8_t)(unsafe.Pointer(&storageOffsetNull)) +C.atg_as_strided(ptr, self, csizeDataPtr, csizeLen, cstrideDataPtr, cstrideLen, cstorageOffsetVal, cstorageOffsetNull) } -func AtgBitwiseXorOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor) { - C.atg_bitwise_xor_out(ptr, out, self, other) +func AtgAsStrided_(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int, strideData []int64, strideLen int, storageOffsetVal int64, storageOffsetNull int){ +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cstorageOffsetVal := *(*C.int64_t)(unsafe.Pointer(&storageOffsetVal)) +cstorageOffsetNull := *(*C.uint8_t)(unsafe.Pointer(&storageOffsetNull)) +C.atg_as_strided_(ptr, self, csizeDataPtr, csizeLen, cstrideDataPtr, cstrideLen, cstorageOffsetVal, cstorageOffsetNull) } -func AtgBitwiseXorOut1(ptr *Ctensor, out Ctensor, self Ctensor, other Cscalar) { - C.atg_bitwise_xor_out1(ptr, out, self, other) +func AtgAsin(ptr *Ctensor, self Ctensor){ +C.atg_asin(ptr, self) } -func AtgBlackmanWindow(ptr *Ctensor, windowLength int64, optionsKind int32, optionsDevice int32) { - cwindowLength := *(*C.int64_t)(unsafe.Pointer(&windowLength)) - coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) - coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) - C.atg_blackman_window(ptr, cwindowLength, coptionsKind, coptionsDevice) +func AtgAsin_(ptr *Ctensor, self Ctensor){ +C.atg_asin_(ptr, self) } -func AtgBlackmanWindow1(ptr *Ctensor, windowLength int64, periodic int32, optionsKind int32, optionsDevice int32) { - cwindowLength := *(*C.int64_t)(unsafe.Pointer(&windowLength)) - cperiodic := *(*C.int)(unsafe.Pointer(&periodic)) - coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) - coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) - C.atg_blackman_window1(ptr, cwindowLength, cperiodic, coptionsKind, coptionsDevice) +func AtgAsinOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_asin_out(ptr, out, self) } -func AtgBmm(ptr *Ctensor, self Ctensor, mat2 Ctensor) { - C.atg_bmm(ptr, self, mat2) +func AtgAsinh(ptr *Ctensor, self Ctensor){ +C.atg_asinh(ptr, self) } -func AtgBmmOut(ptr *Ctensor, out Ctensor, self Ctensor, mat2 Ctensor) { - C.atg_bmm_out(ptr, out, self, mat2) +func AtgAsinh_(ptr *Ctensor, self Ctensor){ +C.atg_asinh_(ptr, self) +} +func AtgAsinhOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_asinh_out(ptr, out, self) +} +func AtgAtan(ptr *Ctensor, self Ctensor){ +C.atg_atan(ptr, self) +} +func AtgAtan2(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_atan2(ptr, self, other) +} +func AtgAtan2_(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_atan2_(ptr, self, other) +} +func AtgAtan2Out(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ +C.atg_atan2_out(ptr, out, self, other) +} +func AtgAtan_(ptr *Ctensor, self Ctensor){ +C.atg_atan_(ptr, self) +} +func AtgAtanOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_atan_out(ptr, out, self) +} +func AtgAtanh(ptr *Ctensor, self Ctensor){ +C.atg_atanh(ptr, self) +} +func AtgAtanh_(ptr *Ctensor, self Ctensor){ +C.atg_atanh_(ptr, self) +} +func AtgAtanhOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_atanh_out(ptr, out, self) +} +func AtgAtleast1d(ptr *Ctensor, self Ctensor){ +C.atg_atleast_1d(ptr, self) } -func AtgCartesianProd(ptr *Ctensor, tensorsData []Ctensor, tensorsLen int) { - ctensorsDataPtr := (*Ctensor)(unsafe.Pointer(&tensorsData[0])) - ctensorsLen := *(*C.int)(unsafe.Pointer(&tensorsLen)) - C.atg_cartesian_prod(ptr, ctensorsDataPtr, ctensorsLen) -} -func AtgCat(ptr *Ctensor, tensorsData []Ctensor, tensorsLen int, dim int64) { - ctensorsDataPtr := (*Ctensor)(unsafe.Pointer(&tensorsData[0])) - ctensorsLen := *(*C.int)(unsafe.Pointer(&tensorsLen)) - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - C.atg_cat(ptr, ctensorsDataPtr, ctensorsLen, cdim) -} -func AtgCatOut(ptr *Ctensor, out Ctensor, tensorsData []Ctensor, tensorsLen int, dim int64) { - ctensorsDataPtr := (*Ctensor)(unsafe.Pointer(&tensorsData[0])) - ctensorsLen := *(*C.int)(unsafe.Pointer(&tensorsLen)) - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - C.atg_cat_out(ptr, out, ctensorsDataPtr, ctensorsLen, cdim) -} -func AtgCauchy_(ptr *Ctensor, self Ctensor, median float64, sigma float64) { - cmedian := *(*C.double)(unsafe.Pointer(&median)) - csigma := *(*C.double)(unsafe.Pointer(&sigma)) - C.atg_cauchy_(ptr, self, cmedian, csigma) -} -func AtgCdist(ptr *Ctensor, x1 Ctensor, x2 Ctensor, p float64, computeMode int64) { - cp := *(*C.double)(unsafe.Pointer(&p)) - ccomputeMode := *(*C.int64_t)(unsafe.Pointer(&computeMode)) - C.atg_cdist(ptr, x1, x2, cp, ccomputeMode) -} -func AtgCeil(ptr *Ctensor, self Ctensor) { - C.atg_ceil(ptr, self) -} -func AtgCeil_(ptr *Ctensor, self Ctensor) { - C.atg_ceil_(ptr, self) -} -func AtgCeilOut(ptr *Ctensor, out Ctensor, self Ctensor) { - C.atg_ceil_out(ptr, out, self) -} -func AtgCelu(ptr *Ctensor, self Ctensor) { - C.atg_celu(ptr, self) -} -func AtgCelu_(ptr *Ctensor, self Ctensor) { - C.atg_celu_(ptr, self) -} -func AtgChainMatmul(ptr *Ctensor, matricesData []Ctensor, matricesLen int) { - cmatricesDataPtr := (*Ctensor)(unsafe.Pointer(&matricesData[0])) - cmatricesLen := *(*C.int)(unsafe.Pointer(&matricesLen)) - C.atg_chain_matmul(ptr, cmatricesDataPtr, cmatricesLen) -} -func AtgCholesky(ptr *Ctensor, self Ctensor, upper int32) { - cupper := *(*C.int)(unsafe.Pointer(&upper)) - C.atg_cholesky(ptr, self, cupper) -} -func AtgCholeskyInverse(ptr *Ctensor, self Ctensor, upper int32) { - cupper := *(*C.int)(unsafe.Pointer(&upper)) - C.atg_cholesky_inverse(ptr, self, cupper) -} -func AtgCholeskyInverseOut(ptr *Ctensor, out Ctensor, self Ctensor, upper int32) { - cupper := *(*C.int)(unsafe.Pointer(&upper)) - C.atg_cholesky_inverse_out(ptr, out, self, cupper) -} -func AtgCholeskyOut(ptr *Ctensor, out Ctensor, self Ctensor, upper int32) { - cupper := *(*C.int)(unsafe.Pointer(&upper)) - C.atg_cholesky_out(ptr, out, self, cupper) -} -func AtgCholeskySolve(ptr *Ctensor, self Ctensor, input2 Ctensor, upper int32) { - cupper := *(*C.int)(unsafe.Pointer(&upper)) - C.atg_cholesky_solve(ptr, self, input2, cupper) -} -func AtgCholeskySolveOut(ptr *Ctensor, out Ctensor, self Ctensor, input2 Ctensor, upper int32) { - cupper := *(*C.int)(unsafe.Pointer(&upper)) - C.atg_cholesky_solve_out(ptr, out, self, input2, cupper) +func AtgAtleast2d(ptr *Ctensor, self Ctensor){ +C.atg_atleast_2d(ptr, self) } -func AtgClamp(ptr *Ctensor, self Ctensor, min Cscalar, max Cscalar) { - C.atg_clamp(ptr, self, min, max) -} -func AtgClamp_(ptr *Ctensor, self Ctensor, min Cscalar, max Cscalar) { - C.atg_clamp_(ptr, self, min, max) -} -func AtgClampMax(ptr *Ctensor, self Ctensor, max Cscalar) { - C.atg_clamp_max(ptr, self, max) -} -func AtgClampMax_(ptr *Ctensor, self Ctensor, max Cscalar) { - C.atg_clamp_max_(ptr, self, max) -} -func AtgClampMaxOut(ptr *Ctensor, out Ctensor, self Ctensor, max Cscalar) { - C.atg_clamp_max_out(ptr, out, self, max) -} -func AtgClampMin(ptr *Ctensor, self Ctensor, min Cscalar) { - C.atg_clamp_min(ptr, self, min) -} -func AtgClampMin_(ptr *Ctensor, self Ctensor, min Cscalar) { - C.atg_clamp_min_(ptr, self, min) -} -func AtgClampMinOut(ptr *Ctensor, out Ctensor, self Ctensor, min Cscalar) { - C.atg_clamp_min_out(ptr, out, self, min) -} -func AtgClampOut(ptr *Ctensor, out Ctensor, self Ctensor, min Cscalar, max Cscalar) { - C.atg_clamp_out(ptr, out, self, min, max) -} -func AtgCoalesce(ptr *Ctensor, self Ctensor) { - C.atg_coalesce(ptr, self) -} -func AtgCol2im(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, kernelSizeData []int64, kernelSizeLen int, dilationData []int64, dilationLen int, paddingData []int64, paddingLen int, strideData []int64, strideLen int) { - coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) - coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) - ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) - ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) - cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) - cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - C.atg_col2im(ptr, self, coutputSizeDataPtr, coutputSizeLen, ckernelSizeDataPtr, ckernelSizeLen, cdilationDataPtr, cdilationLen, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen) -} -func AtgCol2imBackward(ptr *Ctensor, gradOutput Ctensor, kernelSizeData []int64, kernelSizeLen int, dilationData []int64, dilationLen int, paddingData []int64, paddingLen int, strideData []int64, strideLen int) { - ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) - ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) - cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) - cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - C.atg_col2im_backward(ptr, gradOutput, ckernelSizeDataPtr, ckernelSizeLen, cdilationDataPtr, cdilationLen, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen) -} -func AtgCol2imBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, kernelSizeData []int64, kernelSizeLen int, dilationData []int64, dilationLen int, paddingData []int64, paddingLen int, strideData []int64, strideLen int) { - ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) - ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) - cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) - cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - C.atg_col2im_backward_out(ptr, gradInput, gradOutput, ckernelSizeDataPtr, ckernelSizeLen, cdilationDataPtr, cdilationLen, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen) -} -func AtgCol2imOut(ptr *Ctensor, out Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, kernelSizeData []int64, kernelSizeLen int, dilationData []int64, dilationLen int, paddingData []int64, paddingLen int, strideData []int64, strideLen int) { - coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) - coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) - ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) - ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) - cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) - cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - C.atg_col2im_out(ptr, out, self, coutputSizeDataPtr, coutputSizeLen, ckernelSizeDataPtr, ckernelSizeLen, cdilationDataPtr, cdilationLen, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen) -} -func AtgCombinations(ptr *Ctensor, self Ctensor, r int64, withReplacement int32) { - cr := *(*C.int64_t)(unsafe.Pointer(&r)) - cwithReplacement := *(*C.int)(unsafe.Pointer(&withReplacement)) - C.atg_combinations(ptr, self, cr, cwithReplacement) -} -func AtgConj(ptr *Ctensor, self Ctensor) { - C.atg_conj(ptr, self) -} -func AtgConjOut(ptr *Ctensor, out Ctensor, self Ctensor) { - C.atg_conj_out(ptr, out, self) -} -func AtgConstantPadNd(ptr *Ctensor, self Ctensor, padData []int64, padLen int) { - cpadDataPtr := (*C.int64_t)(unsafe.Pointer(&padData[0])) - cpadLen := *(*C.int)(unsafe.Pointer(&padLen)) - C.atg_constant_pad_nd(ptr, self, cpadDataPtr, cpadLen) -} -func AtgContiguous(ptr *Ctensor, self Ctensor) { - C.atg_contiguous(ptr, self) -} -func AtgConv1d(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, groups int64) { - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) - cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) - cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) - C.atg_conv1d(ptr, input, weight, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cgroups) -} -func AtgConv2d(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, groups int64) { - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) - cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) - cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) - C.atg_conv2d(ptr, input, weight, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cgroups) -} -func AtgConv3d(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, groups int64) { - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) - cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) - cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) - C.atg_conv3d(ptr, input, weight, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cgroups) -} -func AtgConvTbc(ptr *Ctensor, self Ctensor, weight Ctensor, bias Ctensor, pad int64) { - cpad := *(*C.int64_t)(unsafe.Pointer(&pad)) - C.atg_conv_tbc(ptr, self, weight, bias, cpad) -} -func AtgConvTbcBackward(ptr *Ctensor, self Ctensor, input Ctensor, weight Ctensor, bias Ctensor, pad int64) { - cpad := *(*C.int64_t)(unsafe.Pointer(&pad)) - C.atg_conv_tbc_backward(ptr, self, input, weight, bias, cpad) -} -func AtgConvTranspose1d(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, outputPaddingData []int64, outputPaddingLen int, groups int64, dilationData []int64, dilationLen int) { - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) - coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) - cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) - cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) - cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) - C.atg_conv_transpose1d(ptr, input, weight, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, coutputPaddingDataPtr, coutputPaddingLen, cgroups, cdilationDataPtr, cdilationLen) -} -func AtgConvTranspose2d(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, outputPaddingData []int64, outputPaddingLen int, groups int64, dilationData []int64, dilationLen int) { - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) - coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) - cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) - cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) - cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) - C.atg_conv_transpose2d(ptr, input, weight, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, coutputPaddingDataPtr, coutputPaddingLen, cgroups, cdilationDataPtr, cdilationLen) -} -func AtgConvTranspose3d(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, outputPaddingData []int64, outputPaddingLen int, groups int64, dilationData []int64, dilationLen int) { - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) - coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) - cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) - cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) - cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) - C.atg_conv_transpose3d(ptr, input, weight, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, coutputPaddingDataPtr, coutputPaddingLen, cgroups, cdilationDataPtr, cdilationLen) -} -func AtgConvolution(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, transposed int32, outputPaddingData []int64, outputPaddingLen int, groups int64) { - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) - cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) - ctransposed := *(*C.int)(unsafe.Pointer(&transposed)) - coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) - coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) - cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) - C.atg_convolution(ptr, input, weight, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, ctransposed, coutputPaddingDataPtr, coutputPaddingLen, cgroups) -} -func AtgConvolutionOverrideable(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, transposed int32, outputPaddingData []int64, outputPaddingLen int, groups int64) { - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) - cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) - ctransposed := *(*C.int)(unsafe.Pointer(&transposed)) - coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) - coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) - cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) - C.atg_convolution_overrideable(ptr, input, weight, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, ctransposed, coutputPaddingDataPtr, coutputPaddingLen, cgroups) -} -func AtgCopySparseToSparse_(ptr *Ctensor, self Ctensor, src Ctensor, nonBlocking int32) { - cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) - C.atg_copy_sparse_to_sparse_(ptr, self, src, cnonBlocking) -} -func AtgCos(ptr *Ctensor, self Ctensor) { - C.atg_cos(ptr, self) -} -func AtgCos_(ptr *Ctensor, self Ctensor) { - C.atg_cos_(ptr, self) -} -func AtgCosOut(ptr *Ctensor, out Ctensor, self Ctensor) { - C.atg_cos_out(ptr, out, self) -} -func AtgCosh(ptr *Ctensor, self Ctensor) { - C.atg_cosh(ptr, self) -} -func AtgCosh_(ptr *Ctensor, self Ctensor) { - C.atg_cosh_(ptr, self) -} -func AtgCoshOut(ptr *Ctensor, out Ctensor, self Ctensor) { - C.atg_cosh_out(ptr, out, self) -} -func AtgCosineEmbeddingLoss(ptr *Ctensor, input1 Ctensor, input2 Ctensor, target Ctensor, margin float64, reduction int64) { - cmargin := *(*C.double)(unsafe.Pointer(&margin)) - creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) - C.atg_cosine_embedding_loss(ptr, input1, input2, target, cmargin, creduction) -} -func AtgCosineSimilarity(ptr *Ctensor, x1 Ctensor, x2 Ctensor, dim int64, eps float64) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - ceps := *(*C.double)(unsafe.Pointer(&eps)) - C.atg_cosine_similarity(ptr, x1, x2, cdim, ceps) -} -func AtgCross(ptr *Ctensor, self Ctensor, other Ctensor, dim int64) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - C.atg_cross(ptr, self, other, cdim) -} -func AtgCrossOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor, dim int64) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - C.atg_cross_out(ptr, out, self, other, cdim) -} -func AtgCtcLoss(ptr *Ctensor, logProbs Ctensor, targets Ctensor, inputLengthsData []int64, inputLengthsLen int, targetLengthsData []int64, targetLengthsLen int, blank int64, reduction int64, zeroInfinity int32) { - cinputLengthsDataPtr := (*C.int64_t)(unsafe.Pointer(&inputLengthsData[0])) - cinputLengthsLen := *(*C.int)(unsafe.Pointer(&inputLengthsLen)) - ctargetLengthsDataPtr := (*C.int64_t)(unsafe.Pointer(&targetLengthsData[0])) - ctargetLengthsLen := *(*C.int)(unsafe.Pointer(&targetLengthsLen)) - cblank := *(*C.int64_t)(unsafe.Pointer(&blank)) - creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) - czeroInfinity := *(*C.int)(unsafe.Pointer(&zeroInfinity)) - C.atg_ctc_loss(ptr, logProbs, targets, cinputLengthsDataPtr, cinputLengthsLen, ctargetLengthsDataPtr, ctargetLengthsLen, cblank, creduction, czeroInfinity) -} -func AtgCtcLoss1(ptr *Ctensor, logProbs Ctensor, targets Ctensor, inputLengths Ctensor, targetLengths Ctensor, blank int64, reduction int64, zeroInfinity int32) { - cblank := *(*C.int64_t)(unsafe.Pointer(&blank)) - creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) - czeroInfinity := *(*C.int)(unsafe.Pointer(&zeroInfinity)) - C.atg_ctc_loss1(ptr, logProbs, targets, inputLengths, targetLengths, cblank, creduction, czeroInfinity) -} -func AtgCudnnAffineGridGenerator(ptr *Ctensor, theta Ctensor, n int64, c int64, h int64, w int64) { - cn := *(*C.int64_t)(unsafe.Pointer(&n)) - cc := *(*C.int64_t)(unsafe.Pointer(&c)) - ch := *(*C.int64_t)(unsafe.Pointer(&h)) - cw := *(*C.int64_t)(unsafe.Pointer(&w)) - C.atg_cudnn_affine_grid_generator(ptr, theta, cn, cc, ch, cw) -} -func AtgCudnnAffineGridGeneratorBackward(ptr *Ctensor, grad Ctensor, n int64, c int64, h int64, w int64) { - cn := *(*C.int64_t)(unsafe.Pointer(&n)) - cc := *(*C.int64_t)(unsafe.Pointer(&c)) - ch := *(*C.int64_t)(unsafe.Pointer(&h)) - cw := *(*C.int64_t)(unsafe.Pointer(&w)) - C.atg_cudnn_affine_grid_generator_backward(ptr, grad, cn, cc, ch, cw) -} -func AtgCudnnBatchNorm(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, runningMean Ctensor, runningVar Ctensor, training int32, exponentialAverageFactor float64, epsilon float64) { - ctraining := *(*C.int)(unsafe.Pointer(&training)) - cexponentialAverageFactor := *(*C.double)(unsafe.Pointer(&exponentialAverageFactor)) - cepsilon := *(*C.double)(unsafe.Pointer(&epsilon)) - C.atg_cudnn_batch_norm(ptr, input, weight, bias, runningMean, runningVar, ctraining, cexponentialAverageFactor, cepsilon) -} -func AtgCudnnBatchNormBackward(ptr *Ctensor, input Ctensor, gradOutput Ctensor, weight Ctensor, runningMean Ctensor, runningVar Ctensor, saveMean Ctensor, saveVar Ctensor, epsilon float64, reserveSpace Ctensor) { - cepsilon := *(*C.double)(unsafe.Pointer(&epsilon)) - C.atg_cudnn_batch_norm_backward(ptr, input, gradOutput, weight, runningMean, runningVar, saveMean, saveVar, cepsilon, reserveSpace) -} -func AtgCudnnConvolution(ptr *Ctensor, self Ctensor, weight Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32) { - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) - cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) - cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) - cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) - cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) - C.atg_cudnn_convolution(ptr, self, weight, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) -} -func AtgCudnnConvolution1(ptr *Ctensor, self Ctensor, weight Ctensor, bias Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32) { - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) - cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) - cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) - cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) - cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) - C.atg_cudnn_convolution1(ptr, self, weight, bias, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) -} -func AtgCudnnConvolutionBackwardInput(ptr *Ctensor, selfSizeData []int64, selfSizeLen int, gradOutput Ctensor, weight Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32) { - cselfSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&selfSizeData[0])) - cselfSizeLen := *(*C.int)(unsafe.Pointer(&selfSizeLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) - cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) - cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) - cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) - cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) - C.atg_cudnn_convolution_backward_input(ptr, cselfSizeDataPtr, cselfSizeLen, gradOutput, weight, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) -} -func AtgCudnnConvolutionBackwardWeight(ptr *Ctensor, weightSizeData []int64, weightSizeLen int, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32) { - cweightSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&weightSizeData[0])) - cweightSizeLen := *(*C.int)(unsafe.Pointer(&weightSizeLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) - cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) - cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) - cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) - cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) - C.atg_cudnn_convolution_backward_weight(ptr, cweightSizeDataPtr, cweightSizeLen, gradOutput, self, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) -} -func AtgCudnnConvolutionTranspose(ptr *Ctensor, self Ctensor, weight Ctensor, paddingData []int64, paddingLen int, outputPaddingData []int64, outputPaddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32) { - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) - coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) - cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) - cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) - cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) - cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) - C.atg_cudnn_convolution_transpose(ptr, self, weight, cpaddingDataPtr, cpaddingLen, coutputPaddingDataPtr, coutputPaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) -} -func AtgCudnnConvolutionTranspose1(ptr *Ctensor, self Ctensor, weight Ctensor, bias Ctensor, paddingData []int64, paddingLen int, outputPaddingData []int64, outputPaddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32) { - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) - coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) - cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) - cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) - cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) - cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) - C.atg_cudnn_convolution_transpose1(ptr, self, weight, bias, cpaddingDataPtr, cpaddingLen, coutputPaddingDataPtr, coutputPaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) -} -func AtgCudnnConvolutionTransposeBackwardInput(ptr *Ctensor, gradOutput Ctensor, weight Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32) { - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) - cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) - cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) - cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) - cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) - C.atg_cudnn_convolution_transpose_backward_input(ptr, gradOutput, weight, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) -} -func AtgCudnnConvolutionTransposeBackwardWeight(ptr *Ctensor, weightSizeData []int64, weightSizeLen int, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32) { - cweightSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&weightSizeData[0])) - cweightSizeLen := *(*C.int)(unsafe.Pointer(&weightSizeLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) - cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) - cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) - cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) - cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) - C.atg_cudnn_convolution_transpose_backward_weight(ptr, cweightSizeDataPtr, cweightSizeLen, gradOutput, self, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) -} -func AtgCudnnGridSampler(ptr *Ctensor, self Ctensor, grid Ctensor) { - C.atg_cudnn_grid_sampler(ptr, self, grid) -} -func AtgCudnnGridSamplerBackward(ptr *Ctensor, self Ctensor, grid Ctensor, gradOutput Ctensor) { - C.atg_cudnn_grid_sampler_backward(ptr, self, grid, gradOutput) -} -func AtgCummax(ptr *Ctensor, self Ctensor, dim int64) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - C.atg_cummax(ptr, self, cdim) -} -func AtgCummaxOut(ptr *Ctensor, values Ctensor, indices Ctensor, self Ctensor, dim int64) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - C.atg_cummax_out(ptr, values, indices, self, cdim) -} -func AtgCummin(ptr *Ctensor, self Ctensor, dim int64) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - C.atg_cummin(ptr, self, cdim) -} -func AtgCumminOut(ptr *Ctensor, values Ctensor, indices Ctensor, self Ctensor, dim int64) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - C.atg_cummin_out(ptr, values, indices, self, cdim) -} -func AtgCumprod(ptr *Ctensor, self Ctensor, dim int64, dtype int32) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - cdtype := *(*C.int)(unsafe.Pointer(&dtype)) - C.atg_cumprod(ptr, self, cdim, cdtype) -} -func AtgCumprodOut(ptr *Ctensor, out Ctensor, self Ctensor, dim int64, dtype int32) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - cdtype := *(*C.int)(unsafe.Pointer(&dtype)) - C.atg_cumprod_out(ptr, out, self, cdim, cdtype) -} -func AtgCumsum(ptr *Ctensor, self Ctensor, dim int64, dtype int32) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - cdtype := *(*C.int)(unsafe.Pointer(&dtype)) - C.atg_cumsum(ptr, self, cdim, cdtype) -} -func AtgCumsumOut(ptr *Ctensor, out Ctensor, self Ctensor, dim int64, dtype int32) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - cdtype := *(*C.int)(unsafe.Pointer(&dtype)) - C.atg_cumsum_out(ptr, out, self, cdim, cdtype) -} -func AtgData(ptr *Ctensor, self Ctensor) { - C.atg_data(ptr, self) -} -func AtgDequantize(ptr *Ctensor, self Ctensor) { - C.atg_dequantize(ptr, self) -} -func AtgDet(ptr *Ctensor, self Ctensor) { - C.atg_det(ptr, self) -} -func AtgDetach(ptr *Ctensor, self Ctensor) { - C.atg_detach(ptr, self) -} -func AtgDetach_(ptr *Ctensor, self Ctensor) { - C.atg_detach_(ptr, self) -} -func AtgDiag(ptr *Ctensor, self Ctensor, diagonal int64) { - cdiagonal := *(*C.int64_t)(unsafe.Pointer(&diagonal)) - C.atg_diag(ptr, self, cdiagonal) -} -func AtgDiagEmbed(ptr *Ctensor, self Ctensor, offset int64, dim1 int64, dim2 int64) { - coffset := *(*C.int64_t)(unsafe.Pointer(&offset)) - cdim1 := *(*C.int64_t)(unsafe.Pointer(&dim1)) - cdim2 := *(*C.int64_t)(unsafe.Pointer(&dim2)) - C.atg_diag_embed(ptr, self, coffset, cdim1, cdim2) -} -func AtgDiagOut(ptr *Ctensor, out Ctensor, self Ctensor, diagonal int64) { - cdiagonal := *(*C.int64_t)(unsafe.Pointer(&diagonal)) - C.atg_diag_out(ptr, out, self, cdiagonal) -} -func AtgDiagflat(ptr *Ctensor, self Ctensor, offset int64) { - coffset := *(*C.int64_t)(unsafe.Pointer(&offset)) - C.atg_diagflat(ptr, self, coffset) -} -func AtgDiagonal(ptr *Ctensor, self Ctensor, offset int64, dim1 int64, dim2 int64) { - coffset := *(*C.int64_t)(unsafe.Pointer(&offset)) - cdim1 := *(*C.int64_t)(unsafe.Pointer(&dim1)) - cdim2 := *(*C.int64_t)(unsafe.Pointer(&dim2)) - C.atg_diagonal(ptr, self, coffset, cdim1, cdim2) -} -func AtgDigamma(ptr *Ctensor, self Ctensor) { - C.atg_digamma(ptr, self) -} -func AtgDigamma_(ptr *Ctensor, self Ctensor) { - C.atg_digamma_(ptr, self) -} -func AtgDigammaOut(ptr *Ctensor, out Ctensor, self Ctensor) { - C.atg_digamma_out(ptr, out, self) -} -func AtgDist(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg_dist(ptr, self, other) -} -func AtgDiv(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg_div(ptr, self, other) -} -func AtgDiv1(ptr *Ctensor, self Ctensor, other Cscalar) { - C.atg_div1(ptr, self, other) -} -func AtgDiv_(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg_div_(ptr, self, other) -} -func AtgDiv1_(ptr *Ctensor, self Ctensor, other Cscalar) { - C.atg_div_1(ptr, self, other) -} -func AtgDivOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor) { - C.atg_div_out(ptr, out, self, other) -} -func AtgDot(ptr *Ctensor, self Ctensor, tensor Ctensor) { - C.atg_dot(ptr, self, tensor) -} -func AtgDotOut(ptr *Ctensor, out Ctensor, self Ctensor, tensor Ctensor) { - C.atg_dot_out(ptr, out, self, tensor) -} -func AtgDropout(ptr *Ctensor, input Ctensor, p float64, train int32) { - cp := *(*C.double)(unsafe.Pointer(&p)) - ctrain := *(*C.int)(unsafe.Pointer(&train)) - C.atg_dropout(ptr, input, cp, ctrain) -} -func AtgDropout_(ptr *Ctensor, self Ctensor, p float64, train int32) { - cp := *(*C.double)(unsafe.Pointer(&p)) - ctrain := *(*C.int)(unsafe.Pointer(&train)) - C.atg_dropout_(ptr, self, cp, ctrain) -} -func AtgEig(ptr *Ctensor, self Ctensor, eigenvectors int32) { - ceigenvectors := *(*C.int)(unsafe.Pointer(&eigenvectors)) - C.atg_eig(ptr, self, ceigenvectors) -} -func AtgEigOut(ptr *Ctensor, e Ctensor, v Ctensor, self Ctensor, eigenvectors int32) { - ceigenvectors := *(*C.int)(unsafe.Pointer(&eigenvectors)) - C.atg_eig_out(ptr, e, v, self, ceigenvectors) -} -func AtgEinsum(ptr *Ctensor, equation string, tensorsData []Ctensor, tensorsLen int) { - cequation := C.CString(equation) - equationLen := len(equation) - cequationLen := *(*C.int)(unsafe.Pointer(&equationLen)) - ctensorsDataPtr := (*Ctensor)(unsafe.Pointer(&tensorsData[0])) - ctensorsLen := *(*C.int)(unsafe.Pointer(&tensorsLen)) - C.atg_einsum(ptr, cequation, cequationLen, ctensorsDataPtr, ctensorsLen) -} -func AtgElu(ptr *Ctensor, self Ctensor) { - C.atg_elu(ptr, self) -} -func AtgElu_(ptr *Ctensor, self Ctensor) { - C.atg_elu_(ptr, self) -} -func AtgEluBackward(ptr *Ctensor, gradOutput Ctensor, alpha Cscalar, scale Cscalar, inputScale Cscalar, output Ctensor) { - C.atg_elu_backward(ptr, gradOutput, alpha, scale, inputScale, output) -} -func AtgEluBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, alpha Cscalar, scale Cscalar, inputScale Cscalar, output Ctensor) { - C.atg_elu_backward_out(ptr, gradInput, gradOutput, alpha, scale, inputScale, output) -} -func AtgEluOut(ptr *Ctensor, out Ctensor, self Ctensor) { - C.atg_elu_out(ptr, out, self) -} -func AtgEmbedding(ptr *Ctensor, weight Ctensor, indices Ctensor, paddingIdx int64, scaleGradByFreq int32, sparse int32) { - cpaddingIdx := *(*C.int64_t)(unsafe.Pointer(&paddingIdx)) - cscaleGradByFreq := *(*C.int)(unsafe.Pointer(&scaleGradByFreq)) - csparse := *(*C.int)(unsafe.Pointer(&sparse)) - C.atg_embedding(ptr, weight, indices, cpaddingIdx, cscaleGradByFreq, csparse) -} -func AtgEmbeddingBackward(ptr *Ctensor, grad Ctensor, indices Ctensor, numWeights int64, paddingIdx int64, scaleGradByFreq int32, sparse int32) { - cnumWeights := *(*C.int64_t)(unsafe.Pointer(&numWeights)) - cpaddingIdx := *(*C.int64_t)(unsafe.Pointer(&paddingIdx)) - cscaleGradByFreq := *(*C.int)(unsafe.Pointer(&scaleGradByFreq)) - csparse := *(*C.int)(unsafe.Pointer(&sparse)) - C.atg_embedding_backward(ptr, grad, indices, cnumWeights, cpaddingIdx, cscaleGradByFreq, csparse) -} -func AtgEmbeddingBag(ptr *Ctensor, weight Ctensor, indices Ctensor, offsets Ctensor, scaleGradByFreq int32, mode int64, sparse int32, perSampleWeights Ctensor, includeLastOffset int32) { - cscaleGradByFreq := *(*C.int)(unsafe.Pointer(&scaleGradByFreq)) - cmode := *(*C.int64_t)(unsafe.Pointer(&mode)) - csparse := *(*C.int)(unsafe.Pointer(&sparse)) - cincludeLastOffset := *(*C.int)(unsafe.Pointer(&includeLastOffset)) - C.atg_embedding_bag(ptr, weight, indices, offsets, cscaleGradByFreq, cmode, csparse, perSampleWeights, cincludeLastOffset) -} -func AtgEmbeddingDenseBackward(ptr *Ctensor, gradOutput Ctensor, indices Ctensor, numWeights int64, paddingIdx int64, scaleGradByFreq int32) { - cnumWeights := *(*C.int64_t)(unsafe.Pointer(&numWeights)) - cpaddingIdx := *(*C.int64_t)(unsafe.Pointer(&paddingIdx)) - cscaleGradByFreq := *(*C.int)(unsafe.Pointer(&scaleGradByFreq)) - C.atg_embedding_dense_backward(ptr, gradOutput, indices, cnumWeights, cpaddingIdx, cscaleGradByFreq) -} -func AtgEmbeddingRenorm_(ptr *Ctensor, self Ctensor, indices Ctensor, maxNorm float64, normType float64) { - cmaxNorm := *(*C.double)(unsafe.Pointer(&maxNorm)) - cnormType := *(*C.double)(unsafe.Pointer(&normType)) - C.atg_embedding_renorm_(ptr, self, indices, cmaxNorm, cnormType) -} -func AtgEmbeddingSparseBackward(ptr *Ctensor, grad Ctensor, indices Ctensor, numWeights int64, paddingIdx int64, scaleGradByFreq int32) { - cnumWeights := *(*C.int64_t)(unsafe.Pointer(&numWeights)) - cpaddingIdx := *(*C.int64_t)(unsafe.Pointer(&paddingIdx)) - cscaleGradByFreq := *(*C.int)(unsafe.Pointer(&scaleGradByFreq)) - C.atg_embedding_sparse_backward(ptr, grad, indices, cnumWeights, cpaddingIdx, cscaleGradByFreq) -} -func AtgEmpty(ptr *Ctensor, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32) { - csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) - csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) - coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) - coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) - C.atg_empty(ptr, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice) -} -func AtgEmptyLike(ptr *Ctensor, self Ctensor) { - C.atg_empty_like(ptr, self) -} -func AtgEmptyOut(ptr *Ctensor, out Ctensor, sizeData []int64, sizeLen int) { - csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) - csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) - C.atg_empty_out(ptr, out, csizeDataPtr, csizeLen) -} -func AtgEmptyStrided(ptr *Ctensor, sizeData []int64, sizeLen int, strideData []int64, strideLen int, optionsKind int32, optionsDevice int32) { - csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) - csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) - coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) - C.atg_empty_strided(ptr, csizeDataPtr, csizeLen, cstrideDataPtr, cstrideLen, coptionsKind, coptionsDevice) -} -func AtgEq(ptr *Ctensor, self Ctensor, other Cscalar) { - C.atg_eq(ptr, self, other) -} -func AtgEq1(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg_eq1(ptr, self, other) -} -func AtgEq_(ptr *Ctensor, self Ctensor, other Cscalar) { - C.atg_eq_(ptr, self, other) -} -func AtgEq1_(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg_eq_1(ptr, self, other) -} -func AtgEqOut(ptr *Ctensor, out Ctensor, self Ctensor, other Cscalar) { - C.atg_eq_out(ptr, out, self, other) -} -func AtgEqOut1(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor) { - C.atg_eq_out1(ptr, out, self, other) -} -func AtgErf(ptr *Ctensor, self Ctensor) { - C.atg_erf(ptr, self) -} -func AtgErf_(ptr *Ctensor, self Ctensor) { - C.atg_erf_(ptr, self) -} -func AtgErfOut(ptr *Ctensor, out Ctensor, self Ctensor) { - C.atg_erf_out(ptr, out, self) -} -func AtgErfc(ptr *Ctensor, self Ctensor) { - C.atg_erfc(ptr, self) -} -func AtgErfc_(ptr *Ctensor, self Ctensor) { - C.atg_erfc_(ptr, self) -} -func AtgErfcOut(ptr *Ctensor, out Ctensor, self Ctensor) { - C.atg_erfc_out(ptr, out, self) -} -func AtgErfinv(ptr *Ctensor, self Ctensor) { - C.atg_erfinv(ptr, self) -} -func AtgErfinv_(ptr *Ctensor, self Ctensor) { - C.atg_erfinv_(ptr, self) -} -func AtgErfinvOut(ptr *Ctensor, out Ctensor, self Ctensor) { - C.atg_erfinv_out(ptr, out, self) -} -func AtgExp(ptr *Ctensor, self Ctensor) { - C.atg_exp(ptr, self) -} -func AtgExp_(ptr *Ctensor, self Ctensor) { - C.atg_exp_(ptr, self) -} -func AtgExpOut(ptr *Ctensor, out Ctensor, self Ctensor) { - C.atg_exp_out(ptr, out, self) -} -func AtgExpand(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int, implicit int32) { - csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) - csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) - cimplicit := *(*C.int)(unsafe.Pointer(&implicit)) - C.atg_expand(ptr, self, csizeDataPtr, csizeLen, cimplicit) -} -func AtgExpandAs(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg_expand_as(ptr, self, other) -} -func AtgExpm1(ptr *Ctensor, self Ctensor) { - C.atg_expm1(ptr, self) -} -func AtgExpm1_(ptr *Ctensor, self Ctensor) { - C.atg_expm1_(ptr, self) -} -func AtgExpm1Out(ptr *Ctensor, out Ctensor, self Ctensor) { - C.atg_expm1_out(ptr, out, self) -} -func AtgExponential_(ptr *Ctensor, self Ctensor, lambd float64) { - clambd := *(*C.double)(unsafe.Pointer(&lambd)) - C.atg_exponential_(ptr, self, clambd) -} -func AtgEye(ptr *Ctensor, n int64, optionsKind int32, optionsDevice int32) { - cn := *(*C.int64_t)(unsafe.Pointer(&n)) - coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) - coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) - C.atg_eye(ptr, cn, coptionsKind, coptionsDevice) -} -func AtgEye1(ptr *Ctensor, n int64, m int64, optionsKind int32, optionsDevice int32) { - cn := *(*C.int64_t)(unsafe.Pointer(&n)) - cm := *(*C.int64_t)(unsafe.Pointer(&m)) - coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) - coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) - C.atg_eye1(ptr, cn, cm, coptionsKind, coptionsDevice) -} -func AtgEyeOut(ptr *Ctensor, out Ctensor, n int64) { - cn := *(*C.int64_t)(unsafe.Pointer(&n)) - C.atg_eye_out(ptr, out, cn) -} -func AtgEyeOut1(ptr *Ctensor, out Ctensor, n int64, m int64) { - cn := *(*C.int64_t)(unsafe.Pointer(&n)) - cm := *(*C.int64_t)(unsafe.Pointer(&m)) - C.atg_eye_out1(ptr, out, cn, cm) -} -func AtgFakeQuantizePerChannelAffine(ptr *Ctensor, self Ctensor, scale Ctensor, zeroPoint Ctensor, axis int64, quantMin int64, quantMax int64) { - caxis := *(*C.int64_t)(unsafe.Pointer(&axis)) - cquantMin := *(*C.int64_t)(unsafe.Pointer(&quantMin)) - cquantMax := *(*C.int64_t)(unsafe.Pointer(&quantMax)) - C.atg_fake_quantize_per_channel_affine(ptr, self, scale, zeroPoint, caxis, cquantMin, cquantMax) -} -func AtgFakeQuantizePerChannelAffineBackward(ptr *Ctensor, grad Ctensor, self Ctensor, scale Ctensor, zeroPoint Ctensor, axis int64, quantMin int64, quantMax int64) { - caxis := *(*C.int64_t)(unsafe.Pointer(&axis)) - cquantMin := *(*C.int64_t)(unsafe.Pointer(&quantMin)) - cquantMax := *(*C.int64_t)(unsafe.Pointer(&quantMax)) - C.atg_fake_quantize_per_channel_affine_backward(ptr, grad, self, scale, zeroPoint, caxis, cquantMin, cquantMax) -} -func AtgFakeQuantizePerTensorAffine(ptr *Ctensor, self Ctensor, scale float64, zeroPoint int64, quantMin int64, quantMax int64) { - cscale := *(*C.double)(unsafe.Pointer(&scale)) - czeroPoint := *(*C.int64_t)(unsafe.Pointer(&zeroPoint)) - cquantMin := *(*C.int64_t)(unsafe.Pointer(&quantMin)) - cquantMax := *(*C.int64_t)(unsafe.Pointer(&quantMax)) - C.atg_fake_quantize_per_tensor_affine(ptr, self, cscale, czeroPoint, cquantMin, cquantMax) -} -func AtgFakeQuantizePerTensorAffineBackward(ptr *Ctensor, grad Ctensor, self Ctensor, scale float64, zeroPoint int64, quantMin int64, quantMax int64) { - cscale := *(*C.double)(unsafe.Pointer(&scale)) - czeroPoint := *(*C.int64_t)(unsafe.Pointer(&zeroPoint)) - cquantMin := *(*C.int64_t)(unsafe.Pointer(&quantMin)) - cquantMax := *(*C.int64_t)(unsafe.Pointer(&quantMax)) - C.atg_fake_quantize_per_tensor_affine_backward(ptr, grad, self, cscale, czeroPoint, cquantMin, cquantMax) -} -func AtgFbgemmLinearFp16Weight(ptr *Ctensor, input Ctensor, packedWeight Ctensor, bias Ctensor) { - C.atg_fbgemm_linear_fp16_weight(ptr, input, packedWeight, bias) -} -func AtgFbgemmLinearFp16WeightFp32Activation(ptr *Ctensor, input Ctensor, packedWeight Ctensor, bias Ctensor) { - C.atg_fbgemm_linear_fp16_weight_fp32_activation(ptr, input, packedWeight, bias) -} -func AtgFbgemmLinearInt8Weight(ptr *Ctensor, input Ctensor, weight Ctensor, packed Ctensor, colOffsets Ctensor, weightScale Cscalar, weightZeroPoint Cscalar, bias Ctensor) { - C.atg_fbgemm_linear_int8_weight(ptr, input, weight, packed, colOffsets, weightScale, weightZeroPoint, bias) -} -func AtgFbgemmLinearInt8WeightFp32Activation(ptr *Ctensor, input Ctensor, weight Ctensor, packed Ctensor, colOffsets Ctensor, weightScale Cscalar, weightZeroPoint Cscalar, bias Ctensor) { - C.atg_fbgemm_linear_int8_weight_fp32_activation(ptr, input, weight, packed, colOffsets, weightScale, weightZeroPoint, bias) -} -func AtgFbgemmPackGemmMatrixFp16(ptr *Ctensor, input Ctensor) { - C.atg_fbgemm_pack_gemm_matrix_fp16(ptr, input) -} -func AtgFbgemmPackQuantizedMatrix(ptr *Ctensor, input Ctensor) { - C.atg_fbgemm_pack_quantized_matrix(ptr, input) -} -func AtgFbgemmPackQuantizedMatrix1(ptr *Ctensor, input Ctensor, k int64, n int64) { - ck := *(*C.int64_t)(unsafe.Pointer(&k)) - cn := *(*C.int64_t)(unsafe.Pointer(&n)) - C.atg_fbgemm_pack_quantized_matrix1(ptr, input, ck, cn) -} -func AtgFeatureAlphaDropout(ptr *Ctensor, input Ctensor, p float64, train int32) { - cp := *(*C.double)(unsafe.Pointer(&p)) - ctrain := *(*C.int)(unsafe.Pointer(&train)) - C.atg_feature_alpha_dropout(ptr, input, cp, ctrain) -} -func AtgFeatureAlphaDropout_(ptr *Ctensor, self Ctensor, p float64, train int32) { - cp := *(*C.double)(unsafe.Pointer(&p)) - ctrain := *(*C.int)(unsafe.Pointer(&train)) - C.atg_feature_alpha_dropout_(ptr, self, cp, ctrain) -} -func AtgFeatureDropout(ptr *Ctensor, input Ctensor, p float64, train int32) { - cp := *(*C.double)(unsafe.Pointer(&p)) - ctrain := *(*C.int)(unsafe.Pointer(&train)) - C.atg_feature_dropout(ptr, input, cp, ctrain) -} -func AtgFeatureDropout_(ptr *Ctensor, self Ctensor, p float64, train int32) { - cp := *(*C.double)(unsafe.Pointer(&p)) - ctrain := *(*C.int)(unsafe.Pointer(&train)) - C.atg_feature_dropout_(ptr, self, cp, ctrain) -} -func AtgFft(ptr *Ctensor, self Ctensor, signalNdim int64, normalized int32) { - csignalNdim := *(*C.int64_t)(unsafe.Pointer(&signalNdim)) - cnormalized := *(*C.int)(unsafe.Pointer(&normalized)) - C.atg_fft(ptr, self, csignalNdim, cnormalized) -} -func AtgFill_(ptr *Ctensor, self Ctensor, value Cscalar) { - C.atg_fill_(ptr, self, value) -} -func AtgFill1_(ptr *Ctensor, self Ctensor, value Ctensor) { - C.atg_fill_1(ptr, self, value) -} -func AtgFillDiagonal_(ptr *Ctensor, self Ctensor, fillValue Cscalar, wrap int32) { - cwrap := *(*C.int)(unsafe.Pointer(&wrap)) - C.atg_fill_diagonal_(ptr, self, fillValue, cwrap) -} -func AtgFlatten(ptr *Ctensor, self Ctensor, startDim int64, endDim int64) { - cstartDim := *(*C.int64_t)(unsafe.Pointer(&startDim)) - cendDim := *(*C.int64_t)(unsafe.Pointer(&endDim)) - C.atg_flatten(ptr, self, cstartDim, cendDim) -} -func AtgFlip(ptr *Ctensor, self Ctensor, dimsData []int64, dimsLen int) { - cdimsDataPtr := (*C.int64_t)(unsafe.Pointer(&dimsData[0])) - cdimsLen := *(*C.int)(unsafe.Pointer(&dimsLen)) - C.atg_flip(ptr, self, cdimsDataPtr, cdimsLen) -} -func AtgFloor(ptr *Ctensor, self Ctensor) { - C.atg_floor(ptr, self) -} -func AtgFloor_(ptr *Ctensor, self Ctensor) { - C.atg_floor_(ptr, self) -} -func AtgFloorDivide(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg_floor_divide(ptr, self, other) -} -func AtgFloorDivide1(ptr *Ctensor, self Ctensor, other Cscalar) { - C.atg_floor_divide1(ptr, self, other) -} -func AtgFloorDivide_(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg_floor_divide_(ptr, self, other) -} -func AtgFloorDivide1_(ptr *Ctensor, self Ctensor, other Cscalar) { - C.atg_floor_divide_1(ptr, self, other) -} -func AtgFloorDivideOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor) { - C.atg_floor_divide_out(ptr, out, self, other) -} -func AtgFloorOut(ptr *Ctensor, out Ctensor, self Ctensor) { - C.atg_floor_out(ptr, out, self) -} -func AtgFmod(ptr *Ctensor, self Ctensor, other Cscalar) { - C.atg_fmod(ptr, self, other) -} -func AtgFmod1(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg_fmod1(ptr, self, other) -} -func AtgFmod_(ptr *Ctensor, self Ctensor, other Cscalar) { - C.atg_fmod_(ptr, self, other) -} -func AtgFmod1_(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg_fmod_1(ptr, self, other) -} -func AtgFmodOut(ptr *Ctensor, out Ctensor, self Ctensor, other Cscalar) { - C.atg_fmod_out(ptr, out, self, other) -} -func AtgFmodOut1(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor) { - C.atg_fmod_out1(ptr, out, self, other) -} -func AtgFrac(ptr *Ctensor, self Ctensor) { - C.atg_frac(ptr, self) -} -func AtgFrac_(ptr *Ctensor, self Ctensor) { - C.atg_frac_(ptr, self) -} -func AtgFracOut(ptr *Ctensor, out Ctensor, self Ctensor) { - C.atg_frac_out(ptr, out, self) -} -func AtgFractionalMaxPool2d(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, outputSizeData []int64, outputSizeLen int, randomSamples Ctensor) { - ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) - ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) - coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) - coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) - C.atg_fractional_max_pool2d(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, coutputSizeDataPtr, coutputSizeLen, randomSamples) -} -func AtgFractionalMaxPool2dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, outputSizeData []int64, outputSizeLen int, indices Ctensor) { - ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) - ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) - coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) - coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) - C.atg_fractional_max_pool2d_backward(ptr, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, coutputSizeDataPtr, coutputSizeLen, indices) -} -func AtgFractionalMaxPool2dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, outputSizeData []int64, outputSizeLen int, indices Ctensor) { - ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) - ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) - coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) - coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) - C.atg_fractional_max_pool2d_backward_out(ptr, gradInput, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, coutputSizeDataPtr, coutputSizeLen, indices) -} -func AtgFractionalMaxPool2dOut(ptr *Ctensor, output Ctensor, indices Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, outputSizeData []int64, outputSizeLen int, randomSamples Ctensor) { - ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) - ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) - coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) - coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) - C.atg_fractional_max_pool2d_out(ptr, output, indices, self, ckernelSizeDataPtr, ckernelSizeLen, coutputSizeDataPtr, coutputSizeLen, randomSamples) -} -func AtgFractionalMaxPool3d(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, outputSizeData []int64, outputSizeLen int, randomSamples Ctensor) { - ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) - ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) - coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) - coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) - C.atg_fractional_max_pool3d(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, coutputSizeDataPtr, coutputSizeLen, randomSamples) -} -func AtgFractionalMaxPool3dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, outputSizeData []int64, outputSizeLen int, indices Ctensor) { - ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) - ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) - coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) - coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) - C.atg_fractional_max_pool3d_backward(ptr, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, coutputSizeDataPtr, coutputSizeLen, indices) -} -func AtgFractionalMaxPool3dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, outputSizeData []int64, outputSizeLen int, indices Ctensor) { - ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) - ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) - coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) - coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) - C.atg_fractional_max_pool3d_backward_out(ptr, gradInput, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, coutputSizeDataPtr, coutputSizeLen, indices) -} -func AtgFractionalMaxPool3dOut(ptr *Ctensor, output Ctensor, indices Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, outputSizeData []int64, outputSizeLen int, randomSamples Ctensor) { - ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) - ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) - coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) - coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) - C.atg_fractional_max_pool3d_out(ptr, output, indices, self, ckernelSizeDataPtr, ckernelSizeLen, coutputSizeDataPtr, coutputSizeLen, randomSamples) -} -func AtgFrobeniusNorm(ptr *Ctensor, self Ctensor) { - C.atg_frobenius_norm(ptr, self) -} -func AtgFrobeniusNorm1(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32) { - cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) - cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) - ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) - C.atg_frobenius_norm1(ptr, self, cdimDataPtr, cdimLen, ckeepdim) -} -func AtgFrobeniusNormOut(ptr *Ctensor, out Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32) { - cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) - cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) - ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) - C.atg_frobenius_norm_out(ptr, out, self, cdimDataPtr, cdimLen, ckeepdim) -} -func AtgFromFile(ptr *Ctensor, filename string, shared int32, size int64, optionsKind int32, optionsDevice int32) { - cfilename := C.CString(filename) - filenameLen := len(filename) - cfilenameLen := *(*C.int)(unsafe.Pointer(&filenameLen)) - cshared := *(*C.int)(unsafe.Pointer(&shared)) - csize := *(*C.int64_t)(unsafe.Pointer(&size)) - coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) - coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) - C.atg_from_file(ptr, cfilename, cfilenameLen, cshared, csize, coptionsKind, coptionsDevice) -} -func AtgFull(ptr *Ctensor, sizeData []int64, sizeLen int, fillValue Cscalar, optionsKind int32, optionsDevice int32) { - csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) - csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) - coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) - coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) - C.atg_full(ptr, csizeDataPtr, csizeLen, fillValue, coptionsKind, coptionsDevice) -} -func AtgFullLike(ptr *Ctensor, self Ctensor, fillValue Cscalar) { - C.atg_full_like(ptr, self, fillValue) -} -func AtgFullOut(ptr *Ctensor, out Ctensor, sizeData []int64, sizeLen int, fillValue Cscalar) { - csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) - csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) - C.atg_full_out(ptr, out, csizeDataPtr, csizeLen, fillValue) -} -func AtgGather(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, sparseGrad int32) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - csparseGrad := *(*C.int)(unsafe.Pointer(&sparseGrad)) - C.atg_gather(ptr, self, cdim, index, csparseGrad) -} -func AtgGatherOut(ptr *Ctensor, out Ctensor, self Ctensor, dim int64, index Ctensor, sparseGrad int32) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - csparseGrad := *(*C.int)(unsafe.Pointer(&sparseGrad)) - C.atg_gather_out(ptr, out, self, cdim, index, csparseGrad) -} -func AtgGe(ptr *Ctensor, self Ctensor, other Cscalar) { - C.atg_ge(ptr, self, other) -} -func AtgGe1(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg_ge1(ptr, self, other) -} -func AtgGe_(ptr *Ctensor, self Ctensor, other Cscalar) { - C.atg_ge_(ptr, self, other) -} -func AtgGe1_(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg_ge_1(ptr, self, other) -} -func AtgGeOut(ptr *Ctensor, out Ctensor, self Ctensor, other Cscalar) { - C.atg_ge_out(ptr, out, self, other) -} -func AtgGeOut1(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor) { - C.atg_ge_out1(ptr, out, self, other) -} -func AtgGelu(ptr *Ctensor, self Ctensor) { - C.atg_gelu(ptr, self) -} -func AtgGeluBackward(ptr *Ctensor, grad Ctensor, self Ctensor) { - C.atg_gelu_backward(ptr, grad, self) -} -func AtgGeometric_(ptr *Ctensor, self Ctensor, p float64) { - cp := *(*C.double)(unsafe.Pointer(&p)) - C.atg_geometric_(ptr, self, cp) -} -func AtgGeqrf(ptr *Ctensor, self Ctensor) { - C.atg_geqrf(ptr, self) -} -func AtgGeqrfOut(ptr *Ctensor, a Ctensor, tau Ctensor, self Ctensor) { - C.atg_geqrf_out(ptr, a, tau, self) -} -func AtgGer(ptr *Ctensor, self Ctensor, vec2 Ctensor) { - C.atg_ger(ptr, self, vec2) -} -func AtgGerOut(ptr *Ctensor, out Ctensor, self Ctensor, vec2 Ctensor) { - C.atg_ger_out(ptr, out, self, vec2) -} -func AtgGlu(ptr *Ctensor, self Ctensor, dim int64) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - C.atg_glu(ptr, self, cdim) -} -func AtgGluBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, dim int64) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - C.atg_glu_backward(ptr, gradOutput, self, cdim) -} -func AtgGluBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, dim int64) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - C.atg_glu_backward_out(ptr, gradInput, gradOutput, self, cdim) -} -func AtgGluOut(ptr *Ctensor, out Ctensor, self Ctensor, dim int64) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - C.atg_glu_out(ptr, out, self, cdim) -} -func AtgGrad(ptr *Ctensor, self Ctensor) { - C.atg_grad(ptr, self) -} -func AtgGridSampler(ptr *Ctensor, input Ctensor, grid Ctensor, interpolationMode int64, paddingMode int64, alignCorners int32) { - cinterpolationMode := *(*C.int64_t)(unsafe.Pointer(&interpolationMode)) - cpaddingMode := *(*C.int64_t)(unsafe.Pointer(&paddingMode)) - calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) - C.atg_grid_sampler(ptr, input, grid, cinterpolationMode, cpaddingMode, calignCorners) -} -func AtgGridSampler2d(ptr *Ctensor, input Ctensor, grid Ctensor, interpolationMode int64, paddingMode int64, alignCorners int32) { - cinterpolationMode := *(*C.int64_t)(unsafe.Pointer(&interpolationMode)) - cpaddingMode := *(*C.int64_t)(unsafe.Pointer(&paddingMode)) - calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) - C.atg_grid_sampler_2d(ptr, input, grid, cinterpolationMode, cpaddingMode, calignCorners) -} -func AtgGridSampler2dBackward(ptr *Ctensor, gradOutput Ctensor, input Ctensor, grid Ctensor, interpolationMode int64, paddingMode int64, alignCorners int32) { - cinterpolationMode := *(*C.int64_t)(unsafe.Pointer(&interpolationMode)) - cpaddingMode := *(*C.int64_t)(unsafe.Pointer(&paddingMode)) - calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) - C.atg_grid_sampler_2d_backward(ptr, gradOutput, input, grid, cinterpolationMode, cpaddingMode, calignCorners) -} -func AtgGridSampler3d(ptr *Ctensor, input Ctensor, grid Ctensor, interpolationMode int64, paddingMode int64, alignCorners int32) { - cinterpolationMode := *(*C.int64_t)(unsafe.Pointer(&interpolationMode)) - cpaddingMode := *(*C.int64_t)(unsafe.Pointer(&paddingMode)) - calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) - C.atg_grid_sampler_3d(ptr, input, grid, cinterpolationMode, cpaddingMode, calignCorners) -} -func AtgGridSampler3dBackward(ptr *Ctensor, gradOutput Ctensor, input Ctensor, grid Ctensor, interpolationMode int64, paddingMode int64, alignCorners int32) { - cinterpolationMode := *(*C.int64_t)(unsafe.Pointer(&interpolationMode)) - cpaddingMode := *(*C.int64_t)(unsafe.Pointer(&paddingMode)) - calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) - C.atg_grid_sampler_3d_backward(ptr, gradOutput, input, grid, cinterpolationMode, cpaddingMode, calignCorners) -} -func AtgGroupNorm(ptr *Ctensor, input Ctensor, numGroups int64, weight Ctensor, bias Ctensor, eps float64, cudnnEnabled int32) { - cnumGroups := *(*C.int64_t)(unsafe.Pointer(&numGroups)) - ceps := *(*C.double)(unsafe.Pointer(&eps)) - ccudnnEnabled := *(*C.int)(unsafe.Pointer(&cudnnEnabled)) - C.atg_group_norm(ptr, input, cnumGroups, weight, bias, ceps, ccudnnEnabled) -} -func AtgGru(ptr *Ctensor, input Ctensor, hx Ctensor, paramsData []Ctensor, paramsLen int, hasBiases int32, numLayers int64, dropout float64, train int32, bidirectional int32, batchFirst int32) { - cparamsDataPtr := (*Ctensor)(unsafe.Pointer(¶msData[0])) - cparamsLen := *(*C.int)(unsafe.Pointer(¶msLen)) - chasBiases := *(*C.int)(unsafe.Pointer(&hasBiases)) - cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) - cdropout := *(*C.double)(unsafe.Pointer(&dropout)) - ctrain := *(*C.int)(unsafe.Pointer(&train)) - cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) - cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst)) - C.atg_gru(ptr, input, hx, cparamsDataPtr, cparamsLen, chasBiases, cnumLayers, cdropout, ctrain, cbidirectional, cbatchFirst) -} -func AtgGru1(ptr *Ctensor, data Ctensor, batchSizes Ctensor, hx Ctensor, paramsData []Ctensor, paramsLen int, hasBiases int32, numLayers int64, dropout float64, train int32, bidirectional int32) { - cparamsDataPtr := (*Ctensor)(unsafe.Pointer(¶msData[0])) - cparamsLen := *(*C.int)(unsafe.Pointer(¶msLen)) - chasBiases := *(*C.int)(unsafe.Pointer(&hasBiases)) - cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) - cdropout := *(*C.double)(unsafe.Pointer(&dropout)) - ctrain := *(*C.int)(unsafe.Pointer(&train)) - cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) - C.atg_gru1(ptr, data, batchSizes, hx, cparamsDataPtr, cparamsLen, chasBiases, cnumLayers, cdropout, ctrain, cbidirectional) -} -func AtgGruCell(ptr *Ctensor, input Ctensor, hx Ctensor, wIh Ctensor, wHh Ctensor, bIh Ctensor, bHh Ctensor) { - C.atg_gru_cell(ptr, input, hx, wIh, wHh, bIh, bHh) -} -func AtgGt(ptr *Ctensor, self Ctensor, other Cscalar) { - C.atg_gt(ptr, self, other) -} -func AtgGt1(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg_gt1(ptr, self, other) -} -func AtgGt_(ptr *Ctensor, self Ctensor, other Cscalar) { - C.atg_gt_(ptr, self, other) -} -func AtgGt1_(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg_gt_1(ptr, self, other) -} -func AtgGtOut(ptr *Ctensor, out Ctensor, self Ctensor, other Cscalar) { - C.atg_gt_out(ptr, out, self, other) -} -func AtgGtOut1(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor) { - C.atg_gt_out1(ptr, out, self, other) -} -func AtgHammingWindow(ptr *Ctensor, windowLength int64, optionsKind int32, optionsDevice int32) { - cwindowLength := *(*C.int64_t)(unsafe.Pointer(&windowLength)) - coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) - coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) - C.atg_hamming_window(ptr, cwindowLength, coptionsKind, coptionsDevice) -} -func AtgHammingWindow1(ptr *Ctensor, windowLength int64, periodic int32, optionsKind int32, optionsDevice int32) { - cwindowLength := *(*C.int64_t)(unsafe.Pointer(&windowLength)) - cperiodic := *(*C.int)(unsafe.Pointer(&periodic)) - coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) - coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) - C.atg_hamming_window1(ptr, cwindowLength, cperiodic, coptionsKind, coptionsDevice) -} -func AtgHammingWindow2(ptr *Ctensor, windowLength int64, periodic int32, alpha float64, optionsKind int32, optionsDevice int32) { - cwindowLength := *(*C.int64_t)(unsafe.Pointer(&windowLength)) - cperiodic := *(*C.int)(unsafe.Pointer(&periodic)) - calpha := *(*C.double)(unsafe.Pointer(&alpha)) - coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) - coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) - C.atg_hamming_window2(ptr, cwindowLength, cperiodic, calpha, coptionsKind, coptionsDevice) -} -func AtgHammingWindow3(ptr *Ctensor, windowLength int64, periodic int32, alpha float64, beta float64, optionsKind int32, optionsDevice int32) { - cwindowLength := *(*C.int64_t)(unsafe.Pointer(&windowLength)) - cperiodic := *(*C.int)(unsafe.Pointer(&periodic)) - calpha := *(*C.double)(unsafe.Pointer(&alpha)) - cbeta := *(*C.double)(unsafe.Pointer(&beta)) - coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) - coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) - C.atg_hamming_window3(ptr, cwindowLength, cperiodic, calpha, cbeta, coptionsKind, coptionsDevice) -} -func AtgHannWindow(ptr *Ctensor, windowLength int64, optionsKind int32, optionsDevice int32) { - cwindowLength := *(*C.int64_t)(unsafe.Pointer(&windowLength)) - coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) - coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) - C.atg_hann_window(ptr, cwindowLength, coptionsKind, coptionsDevice) -} -func AtgHannWindow1(ptr *Ctensor, windowLength int64, periodic int32, optionsKind int32, optionsDevice int32) { - cwindowLength := *(*C.int64_t)(unsafe.Pointer(&windowLength)) - cperiodic := *(*C.int)(unsafe.Pointer(&periodic)) - coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) - coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) - C.atg_hann_window1(ptr, cwindowLength, cperiodic, coptionsKind, coptionsDevice) -} -func AtgHardshrink(ptr *Ctensor, self Ctensor) { - C.atg_hardshrink(ptr, self) -} -func AtgHardshrinkBackward(ptr *Ctensor, gradOut Ctensor, self Ctensor, lambd Cscalar) { - C.atg_hardshrink_backward(ptr, gradOut, self, lambd) -} -func AtgHardsigmoid(ptr *Ctensor, self Ctensor) { - C.atg_hardsigmoid(ptr, self) -} -func AtgHardsigmoid_(ptr *Ctensor, self Ctensor) { - C.atg_hardsigmoid_(ptr, self) -} -func AtgHardsigmoidBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor) { - C.atg_hardsigmoid_backward(ptr, gradOutput, self) -} -func AtgHardsigmoidOut(ptr *Ctensor, out Ctensor, self Ctensor) { - C.atg_hardsigmoid_out(ptr, out, self) -} -func AtgHardtanh(ptr *Ctensor, self Ctensor) { - C.atg_hardtanh(ptr, self) -} -func AtgHardtanh_(ptr *Ctensor, self Ctensor) { - C.atg_hardtanh_(ptr, self) -} -func AtgHardtanhBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, minVal Cscalar, maxVal Cscalar) { - C.atg_hardtanh_backward(ptr, gradOutput, self, minVal, maxVal) -} -func AtgHardtanhBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, minVal Cscalar, maxVal Cscalar) { - C.atg_hardtanh_backward_out(ptr, gradInput, gradOutput, self, minVal, maxVal) -} -func AtgHardtanhOut(ptr *Ctensor, out Ctensor, self Ctensor) { - C.atg_hardtanh_out(ptr, out, self) -} -func AtgHingeEmbeddingLoss(ptr *Ctensor, self Ctensor, target Ctensor, margin float64, reduction int64) { - cmargin := *(*C.double)(unsafe.Pointer(&margin)) - creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) - C.atg_hinge_embedding_loss(ptr, self, target, cmargin, creduction) -} -func AtgHistc(ptr *Ctensor, self Ctensor, bins int64) { - cbins := *(*C.int64_t)(unsafe.Pointer(&bins)) - C.atg_histc(ptr, self, cbins) -} -func AtgHistcOut(ptr *Ctensor, out Ctensor, self Ctensor, bins int64) { - cbins := *(*C.int64_t)(unsafe.Pointer(&bins)) - C.atg_histc_out(ptr, out, self, cbins) -} -func AtgHspmm(ptr *Ctensor, mat1 Ctensor, mat2 Ctensor) { - C.atg_hspmm(ptr, mat1, mat2) -} -func AtgHspmmOut(ptr *Ctensor, out Ctensor, mat1 Ctensor, mat2 Ctensor) { - C.atg_hspmm_out(ptr, out, mat1, mat2) -} -func AtgIfft(ptr *Ctensor, self Ctensor, signalNdim int64, normalized int32) { - csignalNdim := *(*C.int64_t)(unsafe.Pointer(&signalNdim)) - cnormalized := *(*C.int)(unsafe.Pointer(&normalized)) - C.atg_ifft(ptr, self, csignalNdim, cnormalized) -} -func AtgIm2col(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, dilationData []int64, dilationLen int, paddingData []int64, paddingLen int, strideData []int64, strideLen int) { - ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) - ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) - cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) - cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - C.atg_im2col(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cdilationDataPtr, cdilationLen, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen) -} -func AtgIm2colBackward(ptr *Ctensor, gradOutput Ctensor, inputSizeData []int64, inputSizeLen int, kernelSizeData []int64, kernelSizeLen int, dilationData []int64, dilationLen int, paddingData []int64, paddingLen int, strideData []int64, strideLen int) { - cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) - cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) - ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) - ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) - cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) - cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - C.atg_im2col_backward(ptr, gradOutput, cinputSizeDataPtr, cinputSizeLen, ckernelSizeDataPtr, ckernelSizeLen, cdilationDataPtr, cdilationLen, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen) -} -func AtgIm2colBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, inputSizeData []int64, inputSizeLen int, kernelSizeData []int64, kernelSizeLen int, dilationData []int64, dilationLen int, paddingData []int64, paddingLen int, strideData []int64, strideLen int) { - cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) - cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) - ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) - ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) - cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) - cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - C.atg_im2col_backward_out(ptr, gradInput, gradOutput, cinputSizeDataPtr, cinputSizeLen, ckernelSizeDataPtr, ckernelSizeLen, cdilationDataPtr, cdilationLen, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen) -} -func AtgIm2colOut(ptr *Ctensor, out Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, dilationData []int64, dilationLen int, paddingData []int64, paddingLen int, strideData []int64, strideLen int) { - ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) - ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) - cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) - cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - C.atg_im2col_out(ptr, out, self, ckernelSizeDataPtr, ckernelSizeLen, cdilationDataPtr, cdilationLen, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen) -} -func AtgImag(ptr *Ctensor, self Ctensor) { - C.atg_imag(ptr, self) -} -func AtgIndex(ptr *Ctensor, self Ctensor, indicesData []Ctensor, indicesLen int) { - cindicesDataPtr := (*Ctensor)(unsafe.Pointer(&indicesData[0])) - cindicesLen := *(*C.int)(unsafe.Pointer(&indicesLen)) - C.atg_index(ptr, self, cindicesDataPtr, cindicesLen) -} -func AtgIndexAdd(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, source Ctensor) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - C.atg_index_add(ptr, self, cdim, index, source) -} -func AtgIndexAdd_(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, source Ctensor) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - C.atg_index_add_(ptr, self, cdim, index, source) -} -func AtgIndexCopy(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, source Ctensor) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - C.atg_index_copy(ptr, self, cdim, index, source) -} -func AtgIndexCopy_(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, source Ctensor) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - C.atg_index_copy_(ptr, self, cdim, index, source) -} -func AtgIndexFill(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, value Cscalar) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - C.atg_index_fill(ptr, self, cdim, index, value) -} -func AtgIndexFill1(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, value Ctensor) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - C.atg_index_fill1(ptr, self, cdim, index, value) -} -func AtgIndexFill_(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, value Cscalar) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - C.atg_index_fill_(ptr, self, cdim, index, value) -} -func AtgIndexFill1_(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, value Ctensor) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - C.atg_index_fill_1(ptr, self, cdim, index, value) -} -func AtgIndexPut(ptr *Ctensor, self Ctensor, indicesData []Ctensor, indicesLen int, values Ctensor, accumulate int32) { - cindicesDataPtr := (*Ctensor)(unsafe.Pointer(&indicesData[0])) - cindicesLen := *(*C.int)(unsafe.Pointer(&indicesLen)) - caccumulate := *(*C.int)(unsafe.Pointer(&accumulate)) - C.atg_index_put(ptr, self, cindicesDataPtr, cindicesLen, values, caccumulate) -} -func AtgIndexPut_(ptr *Ctensor, self Ctensor, indicesData []Ctensor, indicesLen int, values Ctensor, accumulate int32) { - cindicesDataPtr := (*Ctensor)(unsafe.Pointer(&indicesData[0])) - cindicesLen := *(*C.int)(unsafe.Pointer(&indicesLen)) - caccumulate := *(*C.int)(unsafe.Pointer(&accumulate)) - C.atg_index_put_(ptr, self, cindicesDataPtr, cindicesLen, values, caccumulate) -} -func AtgIndexSelect(ptr *Ctensor, self Ctensor, dim int64, index Ctensor) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - C.atg_index_select(ptr, self, cdim, index) -} -func AtgIndexSelectOut(ptr *Ctensor, out Ctensor, self Ctensor, dim int64, index Ctensor) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - C.atg_index_select_out(ptr, out, self, cdim, index) -} -func AtgIndices(ptr *Ctensor, self Ctensor) { - C.atg_indices(ptr, self) -} -func AtgInstanceNorm(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, runningMean Ctensor, runningVar Ctensor, useInputStats int32, momentum float64, eps float64, cudnnEnabled int32) { - cuseInputStats := *(*C.int)(unsafe.Pointer(&useInputStats)) - cmomentum := *(*C.double)(unsafe.Pointer(&momentum)) - ceps := *(*C.double)(unsafe.Pointer(&eps)) - ccudnnEnabled := *(*C.int)(unsafe.Pointer(&cudnnEnabled)) - C.atg_instance_norm(ptr, input, weight, bias, runningMean, runningVar, cuseInputStats, cmomentum, ceps, ccudnnEnabled) -} -func AtgIntRepr(ptr *Ctensor, self Ctensor) { - C.atg_int_repr(ptr, self) -} -func AtgInverse(ptr *Ctensor, self Ctensor) { - C.atg_inverse(ptr, self) -} -func AtgInverseOut(ptr *Ctensor, out Ctensor, self Ctensor) { - C.atg_inverse_out(ptr, out, self) -} -func AtgIrfft(ptr *Ctensor, self Ctensor, signalNdim int64, normalized int32, onesided int32, signalSizesData []int64, signalSizesLen int) { - csignalNdim := *(*C.int64_t)(unsafe.Pointer(&signalNdim)) - cnormalized := *(*C.int)(unsafe.Pointer(&normalized)) - conesided := *(*C.int)(unsafe.Pointer(&onesided)) - csignalSizesDataPtr := (*C.int64_t)(unsafe.Pointer(&signalSizesData[0])) - csignalSizesLen := *(*C.int)(unsafe.Pointer(&signalSizesLen)) - C.atg_irfft(ptr, self, csignalNdim, cnormalized, conesided, csignalSizesDataPtr, csignalSizesLen) -} -func AtgIsclose(ptr *Ctensor, self Ctensor, other Ctensor, rtol float64, atol float64, equalNan int32) { - crtol := *(*C.double)(unsafe.Pointer(&rtol)) - catol := *(*C.double)(unsafe.Pointer(&atol)) - cequalNan := *(*C.int)(unsafe.Pointer(&equalNan)) - C.atg_isclose(ptr, self, other, crtol, catol, cequalNan) -} -func AtgIsfinite(ptr *Ctensor, self Ctensor) { - C.atg_isfinite(ptr, self) -} -func AtgIsinf(ptr *Ctensor, self Ctensor) { - C.atg_isinf(ptr, self) -} -func AtgIsnan(ptr *Ctensor, self Ctensor) { - C.atg_isnan(ptr, self) -} -func AtgKlDiv(ptr *Ctensor, self Ctensor, target Ctensor, reduction int64) { - creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) - C.atg_kl_div(ptr, self, target, creduction) -} -func AtgKlDivBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, reduction int64) { - creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) - C.atg_kl_div_backward(ptr, gradOutput, self, target, creduction) -} -func AtgKthvalue(ptr *Ctensor, self Ctensor, k int64, dim int64, keepdim int32) { - ck := *(*C.int64_t)(unsafe.Pointer(&k)) - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) - C.atg_kthvalue(ptr, self, ck, cdim, ckeepdim) -} -func AtgKthvalueOut(ptr *Ctensor, values Ctensor, indices Ctensor, self Ctensor, k int64, dim int64, keepdim int32) { - ck := *(*C.int64_t)(unsafe.Pointer(&k)) - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) - C.atg_kthvalue_out(ptr, values, indices, self, ck, cdim, ckeepdim) -} -func AtgL1Loss(ptr *Ctensor, self Ctensor, target Ctensor, reduction int64) { - creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) - C.atg_l1_loss(ptr, self, target, creduction) -} -func AtgL1LossBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, reduction int64) { - creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) - C.atg_l1_loss_backward(ptr, gradOutput, self, target, creduction) -} -func AtgL1LossBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, reduction int64) { - creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) - C.atg_l1_loss_backward_out(ptr, gradInput, gradOutput, self, target, creduction) -} -func AtgL1LossOut(ptr *Ctensor, out Ctensor, self Ctensor, target Ctensor, reduction int64) { - creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) - C.atg_l1_loss_out(ptr, out, self, target, creduction) -} -func AtgLayerNorm(ptr *Ctensor, input Ctensor, normalizedShapeData []int64, normalizedShapeLen int, weight Ctensor, bias Ctensor, eps float64, cudnnEnable int32) { - cnormalizedShapeDataPtr := (*C.int64_t)(unsafe.Pointer(&normalizedShapeData[0])) - cnormalizedShapeLen := *(*C.int)(unsafe.Pointer(&normalizedShapeLen)) - ceps := *(*C.double)(unsafe.Pointer(&eps)) - ccudnnEnable := *(*C.int)(unsafe.Pointer(&cudnnEnable)) - C.atg_layer_norm(ptr, input, cnormalizedShapeDataPtr, cnormalizedShapeLen, weight, bias, ceps, ccudnnEnable) -} -func AtgLe(ptr *Ctensor, self Ctensor, other Cscalar) { - C.atg_le(ptr, self, other) -} -func AtgLe1(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg_le1(ptr, self, other) -} -func AtgLe_(ptr *Ctensor, self Ctensor, other Cscalar) { - C.atg_le_(ptr, self, other) -} -func AtgLe1_(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg_le_1(ptr, self, other) -} -func AtgLeOut(ptr *Ctensor, out Ctensor, self Ctensor, other Cscalar) { - C.atg_le_out(ptr, out, self, other) -} -func AtgLeOut1(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor) { - C.atg_le_out1(ptr, out, self, other) -} -func AtgLeakyRelu(ptr *Ctensor, self Ctensor) { - C.atg_leaky_relu(ptr, self) -} -func AtgLeakyRelu_(ptr *Ctensor, self Ctensor) { - C.atg_leaky_relu_(ptr, self) -} -func AtgLeakyReluBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, negativeSlope Cscalar, selfIsResult int32) { - cselfIsResult := *(*C.int)(unsafe.Pointer(&selfIsResult)) - C.atg_leaky_relu_backward(ptr, gradOutput, self, negativeSlope, cselfIsResult) -} -func AtgLeakyReluOut(ptr *Ctensor, out Ctensor, self Ctensor) { - C.atg_leaky_relu_out(ptr, out, self) -} -func AtgLerp(ptr *Ctensor, self Ctensor, end Ctensor, weight Cscalar) { - C.atg_lerp(ptr, self, end, weight) -} -func AtgLerp1(ptr *Ctensor, self Ctensor, end Ctensor, weight Ctensor) { - C.atg_lerp1(ptr, self, end, weight) -} -func AtgLerp_(ptr *Ctensor, self Ctensor, end Ctensor, weight Cscalar) { - C.atg_lerp_(ptr, self, end, weight) -} -func AtgLerp1_(ptr *Ctensor, self Ctensor, end Ctensor, weight Ctensor) { - C.atg_lerp_1(ptr, self, end, weight) -} -func AtgLerpOut(ptr *Ctensor, out Ctensor, self Ctensor, end Ctensor, weight Cscalar) { - C.atg_lerp_out(ptr, out, self, end, weight) -} -func AtgLerpOut1(ptr *Ctensor, out Ctensor, self Ctensor, end Ctensor, weight Ctensor) { - C.atg_lerp_out1(ptr, out, self, end, weight) -} -func AtgLgamma(ptr *Ctensor, self Ctensor) { - C.atg_lgamma(ptr, self) -} -func AtgLgamma_(ptr *Ctensor, self Ctensor) { - C.atg_lgamma_(ptr, self) -} -func AtgLgammaOut(ptr *Ctensor, out Ctensor, self Ctensor) { - C.atg_lgamma_out(ptr, out, self) -} -func AtgLinear(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor) { - C.atg_linear(ptr, input, weight, bias) -} -func AtgLinspace(ptr *Ctensor, start Cscalar, end Cscalar, steps int64, optionsKind int32, optionsDevice int32) { - csteps := *(*C.int64_t)(unsafe.Pointer(&steps)) - coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) - coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) - C.atg_linspace(ptr, start, end, csteps, coptionsKind, coptionsDevice) -} -func AtgLinspaceOut(ptr *Ctensor, out Ctensor, start Cscalar, end Cscalar, steps int64) { - csteps := *(*C.int64_t)(unsafe.Pointer(&steps)) - C.atg_linspace_out(ptr, out, start, end, csteps) -} -func AtgLog(ptr *Ctensor, self Ctensor) { - C.atg_log(ptr, self) -} -func AtgLog10(ptr *Ctensor, self Ctensor) { - C.atg_log10(ptr, self) -} -func AtgLog10_(ptr *Ctensor, self Ctensor) { - C.atg_log10_(ptr, self) -} -func AtgLog10Out(ptr *Ctensor, out Ctensor, self Ctensor) { - C.atg_log10_out(ptr, out, self) -} -func AtgLog1p(ptr *Ctensor, self Ctensor) { - C.atg_log1p(ptr, self) -} -func AtgLog1p_(ptr *Ctensor, self Ctensor) { - C.atg_log1p_(ptr, self) -} -func AtgLog1pOut(ptr *Ctensor, out Ctensor, self Ctensor) { - C.atg_log1p_out(ptr, out, self) -} -func AtgLog2(ptr *Ctensor, self Ctensor) { - C.atg_log2(ptr, self) -} -func AtgLog2_(ptr *Ctensor, self Ctensor) { - C.atg_log2_(ptr, self) -} -func AtgLog2Out(ptr *Ctensor, out Ctensor, self Ctensor) { - C.atg_log2_out(ptr, out, self) -} -func AtgLog_(ptr *Ctensor, self Ctensor) { - C.atg_log_(ptr, self) -} -func AtgLogNormal_(ptr *Ctensor, self Ctensor, mean float64, std float64) { - cmean := *(*C.double)(unsafe.Pointer(&mean)) - cstd := *(*C.double)(unsafe.Pointer(&std)) - C.atg_log_normal_(ptr, self, cmean, cstd) -} -func AtgLogOut(ptr *Ctensor, out Ctensor, self Ctensor) { - C.atg_log_out(ptr, out, self) -} -func AtgLogSigmoid(ptr *Ctensor, self Ctensor) { - C.atg_log_sigmoid(ptr, self) -} -func AtgLogSigmoidBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, buffer Ctensor) { - C.atg_log_sigmoid_backward(ptr, gradOutput, self, buffer) -} -func AtgLogSigmoidBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, buffer Ctensor) { - C.atg_log_sigmoid_backward_out(ptr, gradInput, gradOutput, self, buffer) -} -func AtgLogSigmoidOut(ptr *Ctensor, out Ctensor, self Ctensor) { - C.atg_log_sigmoid_out(ptr, out, self) -} -func AtgLogSoftmax(ptr *Ctensor, self Ctensor, dim int64, dtype int32) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - cdtype := *(*C.int)(unsafe.Pointer(&dtype)) - C.atg_log_softmax(ptr, self, cdim, cdtype) -} -func AtgLogdet(ptr *Ctensor, self Ctensor) { - C.atg_logdet(ptr, self) -} -func AtgLogicalAnd(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg_logical_and(ptr, self, other) -} -func AtgLogicalAnd_(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg_logical_and_(ptr, self, other) -} -func AtgLogicalAndOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor) { - C.atg_logical_and_out(ptr, out, self, other) -} -func AtgLogicalNot(ptr *Ctensor, self Ctensor) { - C.atg_logical_not(ptr, self) -} -func AtgLogicalNot_(ptr *Ctensor, self Ctensor) { - C.atg_logical_not_(ptr, self) -} -func AtgLogicalNotOut(ptr *Ctensor, out Ctensor, self Ctensor) { - C.atg_logical_not_out(ptr, out, self) -} -func AtgLogicalOr(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg_logical_or(ptr, self, other) -} -func AtgLogicalOr_(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg_logical_or_(ptr, self, other) -} -func AtgLogicalOrOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor) { - C.atg_logical_or_out(ptr, out, self, other) -} -func AtgLogicalXor(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg_logical_xor(ptr, self, other) -} -func AtgLogicalXor_(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg_logical_xor_(ptr, self, other) -} -func AtgLogicalXorOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor) { - C.atg_logical_xor_out(ptr, out, self, other) -} -func AtgLogspace(ptr *Ctensor, start Cscalar, end Cscalar, steps int64, base float64, optionsKind int32, optionsDevice int32) { - csteps := *(*C.int64_t)(unsafe.Pointer(&steps)) - cbase := *(*C.double)(unsafe.Pointer(&base)) - coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) - coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) - C.atg_logspace(ptr, start, end, csteps, cbase, coptionsKind, coptionsDevice) -} -func AtgLogspaceOut(ptr *Ctensor, out Ctensor, start Cscalar, end Cscalar, steps int64, base float64) { - csteps := *(*C.int64_t)(unsafe.Pointer(&steps)) - cbase := *(*C.double)(unsafe.Pointer(&base)) - C.atg_logspace_out(ptr, out, start, end, csteps, cbase) -} -func AtgLogsumexp(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32) { - cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) - cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) - ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) - C.atg_logsumexp(ptr, self, cdimDataPtr, cdimLen, ckeepdim) -} -func AtgLogsumexpOut(ptr *Ctensor, out Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32) { - cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) - cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) - ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) - C.atg_logsumexp_out(ptr, out, self, cdimDataPtr, cdimLen, ckeepdim) -} -func AtgLstm(ptr *Ctensor, input Ctensor, hxData []Ctensor, hxLen int, paramsData []Ctensor, paramsLen int, hasBiases int32, numLayers int64, dropout float64, train int32, bidirectional int32, batchFirst int32) { - chxDataPtr := (*Ctensor)(unsafe.Pointer(&hxData[0])) - chxLen := *(*C.int)(unsafe.Pointer(&hxLen)) - cparamsDataPtr := (*Ctensor)(unsafe.Pointer(¶msData[0])) - cparamsLen := *(*C.int)(unsafe.Pointer(¶msLen)) - chasBiases := *(*C.int)(unsafe.Pointer(&hasBiases)) - cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) - cdropout := *(*C.double)(unsafe.Pointer(&dropout)) - ctrain := *(*C.int)(unsafe.Pointer(&train)) - cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) - cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst)) - C.atg_lstm(ptr, input, chxDataPtr, chxLen, cparamsDataPtr, cparamsLen, chasBiases, cnumLayers, cdropout, ctrain, cbidirectional, cbatchFirst) -} -func AtgLstm1(ptr *Ctensor, data Ctensor, batchSizes Ctensor, hxData []Ctensor, hxLen int, paramsData []Ctensor, paramsLen int, hasBiases int32, numLayers int64, dropout float64, train int32, bidirectional int32) { - chxDataPtr := (*Ctensor)(unsafe.Pointer(&hxData[0])) - chxLen := *(*C.int)(unsafe.Pointer(&hxLen)) - cparamsDataPtr := (*Ctensor)(unsafe.Pointer(¶msData[0])) - cparamsLen := *(*C.int)(unsafe.Pointer(¶msLen)) - chasBiases := *(*C.int)(unsafe.Pointer(&hasBiases)) - cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) - cdropout := *(*C.double)(unsafe.Pointer(&dropout)) - ctrain := *(*C.int)(unsafe.Pointer(&train)) - cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) - C.atg_lstm1(ptr, data, batchSizes, chxDataPtr, chxLen, cparamsDataPtr, cparamsLen, chasBiases, cnumLayers, cdropout, ctrain, cbidirectional) -} -func AtgLstmCell(ptr *Ctensor, input Ctensor, hxData []Ctensor, hxLen int, wIh Ctensor, wHh Ctensor, bIh Ctensor, bHh Ctensor) { - chxDataPtr := (*Ctensor)(unsafe.Pointer(&hxData[0])) - chxLen := *(*C.int)(unsafe.Pointer(&hxLen)) - C.atg_lstm_cell(ptr, input, chxDataPtr, chxLen, wIh, wHh, bIh, bHh) -} -func AtgLstsq(ptr *Ctensor, self Ctensor, a Ctensor) { - C.atg_lstsq(ptr, self, a) -} -func AtgLstsqOut(ptr *Ctensor, x Ctensor, qr Ctensor, self Ctensor, a Ctensor) { - C.atg_lstsq_out(ptr, x, qr, self, a) -} -func AtgLt(ptr *Ctensor, self Ctensor, other Cscalar) { - C.atg_lt(ptr, self, other) -} -func AtgLt1(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg_lt1(ptr, self, other) -} -func AtgLt_(ptr *Ctensor, self Ctensor, other Cscalar) { - C.atg_lt_(ptr, self, other) -} -func AtgLt1_(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg_lt_1(ptr, self, other) -} -func AtgLtOut(ptr *Ctensor, out Ctensor, self Ctensor, other Cscalar) { - C.atg_lt_out(ptr, out, self, other) -} -func AtgLtOut1(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor) { - C.atg_lt_out1(ptr, out, self, other) -} -func AtgLuSolve(ptr *Ctensor, self Ctensor, lUData Ctensor, lUPivots Ctensor) { - C.atg_lu_solve(ptr, self, lUData, lUPivots) -} -func AtgLuSolveOut(ptr *Ctensor, out Ctensor, self Ctensor, lUData Ctensor, lUPivots Ctensor) { - C.atg_lu_solve_out(ptr, out, self, lUData, lUPivots) -} -func AtgMarginRankingLoss(ptr *Ctensor, input1 Ctensor, input2 Ctensor, target Ctensor, margin float64, reduction int64) { - cmargin := *(*C.double)(unsafe.Pointer(&margin)) - creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) - C.atg_margin_ranking_loss(ptr, input1, input2, target, cmargin, creduction) -} -func AtgMaskedFill(ptr *Ctensor, self Ctensor, mask Ctensor, value Cscalar) { - C.atg_masked_fill(ptr, self, mask, value) -} -func AtgMaskedFill1(ptr *Ctensor, self Ctensor, mask Ctensor, value Ctensor) { - C.atg_masked_fill1(ptr, self, mask, value) -} -func AtgMaskedFill_(ptr *Ctensor, self Ctensor, mask Ctensor, value Cscalar) { - C.atg_masked_fill_(ptr, self, mask, value) -} -func AtgMaskedFill1_(ptr *Ctensor, self Ctensor, mask Ctensor, value Ctensor) { - C.atg_masked_fill_1(ptr, self, mask, value) -} -func AtgMaskedScatter(ptr *Ctensor, self Ctensor, mask Ctensor, source Ctensor) { - C.atg_masked_scatter(ptr, self, mask, source) -} -func AtgMaskedScatter_(ptr *Ctensor, self Ctensor, mask Ctensor, source Ctensor) { - C.atg_masked_scatter_(ptr, self, mask, source) -} -func AtgMaskedSelect(ptr *Ctensor, self Ctensor, mask Ctensor) { - C.atg_masked_select(ptr, self, mask) -} -func AtgMaskedSelectOut(ptr *Ctensor, out Ctensor, self Ctensor, mask Ctensor) { - C.atg_masked_select_out(ptr, out, self, mask) -} -func AtgMatmul(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg_matmul(ptr, self, other) -} -func AtgMatmulOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor) { - C.atg_matmul_out(ptr, out, self, other) -} -func AtgMatrixPower(ptr *Ctensor, self Ctensor, n int64) { - cn := *(*C.int64_t)(unsafe.Pointer(&n)) - C.atg_matrix_power(ptr, self, cn) -} -func AtgMatrixRank(ptr *Ctensor, self Ctensor, symmetric int32) { - csymmetric := *(*C.int)(unsafe.Pointer(&symmetric)) - C.atg_matrix_rank(ptr, self, csymmetric) -} -func AtgMatrixRank1(ptr *Ctensor, self Ctensor, tol float64, symmetric int32) { - ctol := *(*C.double)(unsafe.Pointer(&tol)) - csymmetric := *(*C.int)(unsafe.Pointer(&symmetric)) - C.atg_matrix_rank1(ptr, self, ctol, csymmetric) -} -func AtgMax(ptr *Ctensor, self Ctensor) { - C.atg_max(ptr, self) -} -func AtgMax1(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg_max1(ptr, self, other) -} -func AtgMax2(ptr *Ctensor, self Ctensor, dim int64, keepdim int32) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) - C.atg_max2(ptr, self, cdim, ckeepdim) -} -func AtgMaxOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor) { - C.atg_max_out(ptr, out, self, other) -} -func AtgMaxOut1(ptr *Ctensor, max Ctensor, maxValues Ctensor, self Ctensor, dim int64, keepdim int32) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) - C.atg_max_out1(ptr, max, maxValues, self, cdim, ckeepdim) -} -func AtgMaxPool1d(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32) { - ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) - ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) - cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) - cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) - C.atg_max_pool1d(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode) -} -func AtgMaxPool1dWithIndices(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32) { - ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) - ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) - cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) - cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) - C.atg_max_pool1d_with_indices(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode) -} -func AtgMaxPool2d(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32) { - ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) - ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) - cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) - cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) - C.atg_max_pool2d(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode) -} -func AtgMaxPool2dWithIndices(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32) { - ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) - ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) - cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) - cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) - C.atg_max_pool2d_with_indices(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode) -} -func AtgMaxPool2dWithIndicesBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32, indices Ctensor) { - ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) - ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) - cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) - cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) - C.atg_max_pool2d_with_indices_backward(ptr, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode, indices) -} -func AtgMaxPool2dWithIndicesBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32, indices Ctensor) { - ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) - ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) - cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) - cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) - C.atg_max_pool2d_with_indices_backward_out(ptr, gradInput, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode, indices) -} -func AtgMaxPool2dWithIndicesOut(ptr *Ctensor, out Ctensor, indices Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32) { - ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) - ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) - cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) - cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) - C.atg_max_pool2d_with_indices_out(ptr, out, indices, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode) -} -func AtgMaxPool3d(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32) { - ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) - ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) - cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) - cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) - C.atg_max_pool3d(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode) -} -func AtgMaxPool3dWithIndices(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32) { - ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) - ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) - cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) - cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) - C.atg_max_pool3d_with_indices(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode) -} -func AtgMaxPool3dWithIndicesBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32, indices Ctensor) { - ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) - ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) - cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) - cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) - C.atg_max_pool3d_with_indices_backward(ptr, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode, indices) -} -func AtgMaxPool3dWithIndicesBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32, indices Ctensor) { - ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) - ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) - cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) - cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) - C.atg_max_pool3d_with_indices_backward_out(ptr, gradInput, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode, indices) -} -func AtgMaxPool3dWithIndicesOut(ptr *Ctensor, out Ctensor, indices Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32) { - ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) - ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) - cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) - cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) - C.atg_max_pool3d_with_indices_out(ptr, out, indices, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode) -} -func AtgMaxUnpool2d(ptr *Ctensor, self Ctensor, indices Ctensor, outputSizeData []int64, outputSizeLen int) { - coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) - coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) - C.atg_max_unpool2d(ptr, self, indices, coutputSizeDataPtr, coutputSizeLen) -} -func AtgMaxUnpool2dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, indices Ctensor, outputSizeData []int64, outputSizeLen int) { - coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) - coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) - C.atg_max_unpool2d_backward(ptr, gradOutput, self, indices, coutputSizeDataPtr, coutputSizeLen) -} -func AtgMaxUnpool2dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, indices Ctensor, outputSizeData []int64, outputSizeLen int) { - coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) - coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) - C.atg_max_unpool2d_backward_out(ptr, gradInput, gradOutput, self, indices, coutputSizeDataPtr, coutputSizeLen) -} -func AtgMaxUnpool2dOut(ptr *Ctensor, out Ctensor, self Ctensor, indices Ctensor, outputSizeData []int64, outputSizeLen int) { - coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) - coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) - C.atg_max_unpool2d_out(ptr, out, self, indices, coutputSizeDataPtr, coutputSizeLen) -} -func AtgMaxUnpool3d(ptr *Ctensor, self Ctensor, indices Ctensor, outputSizeData []int64, outputSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int) { - coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) - coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - C.atg_max_unpool3d(ptr, self, indices, coutputSizeDataPtr, coutputSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen) -} -func AtgMaxUnpool3dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, indices Ctensor, outputSizeData []int64, outputSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int) { - coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) - coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - C.atg_max_unpool3d_backward(ptr, gradOutput, self, indices, coutputSizeDataPtr, coutputSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen) -} -func AtgMaxUnpool3dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, indices Ctensor, outputSizeData []int64, outputSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int) { - coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) - coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - C.atg_max_unpool3d_backward_out(ptr, gradInput, gradOutput, self, indices, coutputSizeDataPtr, coutputSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen) -} -func AtgMaxUnpool3dOut(ptr *Ctensor, out Ctensor, self Ctensor, indices Ctensor, outputSizeData []int64, outputSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int) { - coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) - coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - C.atg_max_unpool3d_out(ptr, out, self, indices, coutputSizeDataPtr, coutputSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen) -} -func AtgMaxValues(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32) { - cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) - cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) - ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) - C.atg_max_values(ptr, self, cdimDataPtr, cdimLen, ckeepdim) -} -func AtgMean(ptr *Ctensor, self Ctensor, dtype int32) { - cdtype := *(*C.int)(unsafe.Pointer(&dtype)) - C.atg_mean(ptr, self, cdtype) -} -func AtgMean1(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32, dtype int32) { - cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) - cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) - ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) - cdtype := *(*C.int)(unsafe.Pointer(&dtype)) - C.atg_mean1(ptr, self, cdimDataPtr, cdimLen, ckeepdim, cdtype) -} -func AtgMeanOut(ptr *Ctensor, out Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32, dtype int32) { - cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) - cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) - ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) - cdtype := *(*C.int)(unsafe.Pointer(&dtype)) - C.atg_mean_out(ptr, out, self, cdimDataPtr, cdimLen, ckeepdim, cdtype) -} -func AtgMedian(ptr *Ctensor, self Ctensor) { - C.atg_median(ptr, self) -} -func AtgMedian1(ptr *Ctensor, self Ctensor, dim int64, keepdim int32) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) - C.atg_median1(ptr, self, cdim, ckeepdim) -} -func AtgMedianOut(ptr *Ctensor, values Ctensor, indices Ctensor, self Ctensor, dim int64, keepdim int32) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) - C.atg_median_out(ptr, values, indices, self, cdim, ckeepdim) +func AtgAtleast3d(ptr *Ctensor, self Ctensor){ +C.atg_atleast_3d(ptr, self) } -func AtgMin(ptr *Ctensor, self Ctensor) { - C.atg_min(ptr, self) -} -func AtgMin1(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg_min1(ptr, self, other) -} -func AtgMin2(ptr *Ctensor, self Ctensor, dim int64, keepdim int32) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) - C.atg_min2(ptr, self, cdim, ckeepdim) -} -func AtgMinOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor) { - C.atg_min_out(ptr, out, self, other) -} -func AtgMinOut1(ptr *Ctensor, min Ctensor, minIndices Ctensor, self Ctensor, dim int64, keepdim int32) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) - C.atg_min_out1(ptr, min, minIndices, self, cdim, ckeepdim) -} -func AtgMinValues(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32) { - cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) - cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) - ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) - C.atg_min_values(ptr, self, cdimDataPtr, cdimLen, ckeepdim) -} -func AtgMiopenBatchNorm(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, runningMean Ctensor, runningVar Ctensor, training int32, exponentialAverageFactor float64, epsilon float64) { - ctraining := *(*C.int)(unsafe.Pointer(&training)) - cexponentialAverageFactor := *(*C.double)(unsafe.Pointer(&exponentialAverageFactor)) - cepsilon := *(*C.double)(unsafe.Pointer(&epsilon)) - C.atg_miopen_batch_norm(ptr, input, weight, bias, runningMean, runningVar, ctraining, cexponentialAverageFactor, cepsilon) -} -func AtgMiopenBatchNormBackward(ptr *Ctensor, input Ctensor, gradOutput Ctensor, weight Ctensor, runningMean Ctensor, runningVar Ctensor, saveMean Ctensor, saveVar Ctensor, epsilon float64) { - cepsilon := *(*C.double)(unsafe.Pointer(&epsilon)) - C.atg_miopen_batch_norm_backward(ptr, input, gradOutput, weight, runningMean, runningVar, saveMean, saveVar, cepsilon) -} -func AtgMiopenConvolution(ptr *Ctensor, self Ctensor, weight Ctensor, bias Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32) { - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) - cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) - cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) - cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) - cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) - C.atg_miopen_convolution(ptr, self, weight, bias, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) -} -func AtgMiopenConvolutionBackwardBias(ptr *Ctensor, gradOutput Ctensor) { - C.atg_miopen_convolution_backward_bias(ptr, gradOutput) -} -func AtgMiopenConvolutionBackwardInput(ptr *Ctensor, selfSizeData []int64, selfSizeLen int, gradOutput Ctensor, weight Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32) { - cselfSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&selfSizeData[0])) - cselfSizeLen := *(*C.int)(unsafe.Pointer(&selfSizeLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) - cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) - cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) - cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) - cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) - C.atg_miopen_convolution_backward_input(ptr, cselfSizeDataPtr, cselfSizeLen, gradOutput, weight, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) -} -func AtgMiopenConvolutionBackwardWeight(ptr *Ctensor, weightSizeData []int64, weightSizeLen int, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32) { - cweightSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&weightSizeData[0])) - cweightSizeLen := *(*C.int)(unsafe.Pointer(&weightSizeLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) - cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) - cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) - cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) - cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) - C.atg_miopen_convolution_backward_weight(ptr, cweightSizeDataPtr, cweightSizeLen, gradOutput, self, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) -} -func AtgMiopenConvolutionTranspose(ptr *Ctensor, self Ctensor, weight Ctensor, bias Ctensor, paddingData []int64, paddingLen int, outputPaddingData []int64, outputPaddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32) { - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) - coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) - cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) - cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) - cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) - cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) - C.atg_miopen_convolution_transpose(ptr, self, weight, bias, cpaddingDataPtr, cpaddingLen, coutputPaddingDataPtr, coutputPaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) -} -func AtgMiopenConvolutionTransposeBackwardInput(ptr *Ctensor, gradOutput Ctensor, weight Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32) { - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) - cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) - cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) - cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) - cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) - C.atg_miopen_convolution_transpose_backward_input(ptr, gradOutput, weight, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) -} -func AtgMiopenConvolutionTransposeBackwardWeight(ptr *Ctensor, weightSizeData []int64, weightSizeLen int, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32) { - cweightSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&weightSizeData[0])) - cweightSizeLen := *(*C.int)(unsafe.Pointer(&weightSizeLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) - cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) - cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) - cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) - cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) - C.atg_miopen_convolution_transpose_backward_weight(ptr, cweightSizeDataPtr, cweightSizeLen, gradOutput, self, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) -} -func AtgMiopenDepthwiseConvolution(ptr *Ctensor, self Ctensor, weight Ctensor, bias Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32) { - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) - cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) - cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) - cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) - cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) - C.atg_miopen_depthwise_convolution(ptr, self, weight, bias, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) -} -func AtgMiopenDepthwiseConvolutionBackwardInput(ptr *Ctensor, selfSizeData []int64, selfSizeLen int, gradOutput Ctensor, weight Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32) { - cselfSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&selfSizeData[0])) - cselfSizeLen := *(*C.int)(unsafe.Pointer(&selfSizeLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) - cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) - cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) - cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) - cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) - C.atg_miopen_depthwise_convolution_backward_input(ptr, cselfSizeDataPtr, cselfSizeLen, gradOutput, weight, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) -} -func AtgMiopenDepthwiseConvolutionBackwardWeight(ptr *Ctensor, weightSizeData []int64, weightSizeLen int, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32) { - cweightSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&weightSizeData[0])) - cweightSizeLen := *(*C.int)(unsafe.Pointer(&weightSizeLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) - cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) - cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) - cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) - cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) - C.atg_miopen_depthwise_convolution_backward_weight(ptr, cweightSizeDataPtr, cweightSizeLen, gradOutput, self, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) -} -func AtgMiopenRnn(ptr *Ctensor, input Ctensor, weightData []Ctensor, weightLen int, weightStride0 int64, hx Ctensor, cx Ctensor, mode int64, hiddenSize int64, numLayers int64, batchFirst int32, dropout float64, train int32, bidirectional int32, batchSizesData []int64, batchSizesLen int, dropoutState Ctensor) { - cweightDataPtr := (*Ctensor)(unsafe.Pointer(&weightData[0])) - cweightLen := *(*C.int)(unsafe.Pointer(&weightLen)) - cweightStride0 := *(*C.int64_t)(unsafe.Pointer(&weightStride0)) - cmode := *(*C.int64_t)(unsafe.Pointer(&mode)) - chiddenSize := *(*C.int64_t)(unsafe.Pointer(&hiddenSize)) - cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) - cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst)) - cdropout := *(*C.double)(unsafe.Pointer(&dropout)) - ctrain := *(*C.int)(unsafe.Pointer(&train)) - cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) - cbatchSizesDataPtr := (*C.int64_t)(unsafe.Pointer(&batchSizesData[0])) - cbatchSizesLen := *(*C.int)(unsafe.Pointer(&batchSizesLen)) - C.atg_miopen_rnn(ptr, input, cweightDataPtr, cweightLen, cweightStride0, hx, cx, cmode, chiddenSize, cnumLayers, cbatchFirst, cdropout, ctrain, cbidirectional, cbatchSizesDataPtr, cbatchSizesLen, dropoutState) -} -func AtgMkldnnAdaptiveAvgPool2d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int) { - coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) - coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) - C.atg_mkldnn_adaptive_avg_pool2d(ptr, self, coutputSizeDataPtr, coutputSizeLen) -} -func AtgMkldnnConvolution(ptr *Ctensor, self Ctensor, weight Ctensor, bias Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64) { - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) - cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) - cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) - C.atg_mkldnn_convolution(ptr, self, weight, bias, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups) -} -func AtgMkldnnConvolutionBackwardInput(ptr *Ctensor, selfSizeData []int64, selfSizeLen int, gradOutput Ctensor, weight Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, biasDefined int32) { - cselfSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&selfSizeData[0])) - cselfSizeLen := *(*C.int)(unsafe.Pointer(&selfSizeLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) - cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) - cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) - cbiasDefined := *(*C.int)(unsafe.Pointer(&biasDefined)) - C.atg_mkldnn_convolution_backward_input(ptr, cselfSizeDataPtr, cselfSizeLen, gradOutput, weight, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbiasDefined) -} -func AtgMkldnnConvolutionBackwardWeights(ptr *Ctensor, weightSizeData []int64, weightSizeLen int, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, biasDefined int32) { - cweightSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&weightSizeData[0])) - cweightSizeLen := *(*C.int)(unsafe.Pointer(&weightSizeLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) - cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) - cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) - cbiasDefined := *(*C.int)(unsafe.Pointer(&biasDefined)) - C.atg_mkldnn_convolution_backward_weights(ptr, cweightSizeDataPtr, cweightSizeLen, gradOutput, self, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbiasDefined) -} -func AtgMkldnnLinear(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor) { - C.atg_mkldnn_linear(ptr, input, weight, bias) -} -func AtgMkldnnMaxPool2d(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32) { - ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) - ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) - cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) - cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) - C.atg_mkldnn_max_pool2d(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode) -} -func AtgMkldnnReorderConv2dWeight(ptr *Ctensor, self Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64) { - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) - cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) - cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) - C.atg_mkldnn_reorder_conv2d_weight(ptr, self, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups) -} -func AtgMm(ptr *Ctensor, self Ctensor, mat2 Ctensor) { - C.atg_mm(ptr, self, mat2) -} -func AtgMmOut(ptr *Ctensor, out Ctensor, self Ctensor, mat2 Ctensor) { - C.atg_mm_out(ptr, out, self, mat2) -} -func AtgMode(ptr *Ctensor, self Ctensor, dim int64, keepdim int32) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) - C.atg_mode(ptr, self, cdim, ckeepdim) -} -func AtgModeOut(ptr *Ctensor, values Ctensor, indices Ctensor, self Ctensor, dim int64, keepdim int32) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) - C.atg_mode_out(ptr, values, indices, self, cdim, ckeepdim) -} -func AtgMseLoss(ptr *Ctensor, self Ctensor, target Ctensor, reduction int64) { - creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) - C.atg_mse_loss(ptr, self, target, creduction) -} -func AtgMseLossBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, reduction int64) { - creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) - C.atg_mse_loss_backward(ptr, gradOutput, self, target, creduction) -} -func AtgMseLossBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, reduction int64) { - creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) - C.atg_mse_loss_backward_out(ptr, gradInput, gradOutput, self, target, creduction) -} -func AtgMseLossOut(ptr *Ctensor, out Ctensor, self Ctensor, target Ctensor, reduction int64) { - creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) - C.atg_mse_loss_out(ptr, out, self, target, creduction) -} -func AtgMul(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg_mul(ptr, self, other) -} -func AtgMul1(ptr *Ctensor, self Ctensor, other Cscalar) { - C.atg_mul1(ptr, self, other) -} -func AtgMul_(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg_mul_(ptr, self, other) -} -func AtgMul1_(ptr *Ctensor, self Ctensor, other Cscalar) { - C.atg_mul_1(ptr, self, other) -} -func AtgMulOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor) { - C.atg_mul_out(ptr, out, self, other) -} -func AtgMultiMarginLossBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, p Cscalar, margin Cscalar, weight Ctensor, reduction int64) { - creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) - C.atg_multi_margin_loss_backward(ptr, gradOutput, self, target, p, margin, weight, creduction) -} -func AtgMultiMarginLossBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, p Cscalar, margin Cscalar, weight Ctensor, reduction int64) { - creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) - C.atg_multi_margin_loss_backward_out(ptr, gradInput, gradOutput, self, target, p, margin, weight, creduction) -} -func AtgMultilabelMarginLoss(ptr *Ctensor, self Ctensor, target Ctensor, reduction int64) { - creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) - C.atg_multilabel_margin_loss(ptr, self, target, creduction) -} -func AtgMultilabelMarginLossBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, reduction int64, isTarget Ctensor) { - creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) - C.atg_multilabel_margin_loss_backward(ptr, gradOutput, self, target, creduction, isTarget) -} -func AtgMultilabelMarginLossBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, reduction int64, isTarget Ctensor) { - creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) - C.atg_multilabel_margin_loss_backward_out(ptr, gradInput, gradOutput, self, target, creduction, isTarget) -} -func AtgMultilabelMarginLossOut(ptr *Ctensor, out Ctensor, self Ctensor, target Ctensor, reduction int64) { - creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) - C.atg_multilabel_margin_loss_out(ptr, out, self, target, creduction) -} -func AtgMultinomial(ptr *Ctensor, self Ctensor, numSamples int64, replacement int32) { - cnumSamples := *(*C.int64_t)(unsafe.Pointer(&numSamples)) - creplacement := *(*C.int)(unsafe.Pointer(&replacement)) - C.atg_multinomial(ptr, self, cnumSamples, creplacement) -} -func AtgMultinomialOut(ptr *Ctensor, out Ctensor, self Ctensor, numSamples int64, replacement int32) { - cnumSamples := *(*C.int64_t)(unsafe.Pointer(&numSamples)) - creplacement := *(*C.int)(unsafe.Pointer(&replacement)) - C.atg_multinomial_out(ptr, out, self, cnumSamples, creplacement) -} -func AtgMv(ptr *Ctensor, self Ctensor, vec Ctensor) { - C.atg_mv(ptr, self, vec) -} -func AtgMvOut(ptr *Ctensor, out Ctensor, self Ctensor, vec Ctensor) { - C.atg_mv_out(ptr, out, self, vec) -} -func AtgMvlgamma(ptr *Ctensor, self Ctensor, p int64) { - cp := *(*C.int64_t)(unsafe.Pointer(&p)) - C.atg_mvlgamma(ptr, self, cp) -} -func AtgMvlgamma_(ptr *Ctensor, self Ctensor, p int64) { - cp := *(*C.int64_t)(unsafe.Pointer(&p)) - C.atg_mvlgamma_(ptr, self, cp) -} -func AtgNarrow(ptr *Ctensor, self Ctensor, dim int64, start int64, length int64) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - cstart := *(*C.int64_t)(unsafe.Pointer(&start)) - clength := *(*C.int64_t)(unsafe.Pointer(&length)) - C.atg_narrow(ptr, self, cdim, cstart, clength) -} -func AtgNarrow1(ptr *Ctensor, self Ctensor, dim int64, start Ctensor, length int64) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - clength := *(*C.int64_t)(unsafe.Pointer(&length)) - C.atg_narrow1(ptr, self, cdim, start, clength) -} -func AtgNarrowCopy(ptr *Ctensor, self Ctensor, dim int64, start int64, length int64) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - cstart := *(*C.int64_t)(unsafe.Pointer(&start)) - clength := *(*C.int64_t)(unsafe.Pointer(&length)) - C.atg_narrow_copy(ptr, self, cdim, cstart, clength) -} -func AtgNativeBatchNorm(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, runningMean Ctensor, runningVar Ctensor, training int32, momentum float64, eps float64) { - ctraining := *(*C.int)(unsafe.Pointer(&training)) - cmomentum := *(*C.double)(unsafe.Pointer(&momentum)) - ceps := *(*C.double)(unsafe.Pointer(&eps)) - C.atg_native_batch_norm(ptr, input, weight, bias, runningMean, runningVar, ctraining, cmomentum, ceps) -} -func AtgNativeBatchNormOut(ptr *Ctensor, out Ctensor, saveMean Ctensor, saveInvstd Ctensor, input Ctensor, weight Ctensor, bias Ctensor, runningMean Ctensor, runningVar Ctensor, training int32, momentum float64, eps float64) { - ctraining := *(*C.int)(unsafe.Pointer(&training)) - cmomentum := *(*C.double)(unsafe.Pointer(&momentum)) - ceps := *(*C.double)(unsafe.Pointer(&eps)) - C.atg_native_batch_norm_out(ptr, out, saveMean, saveInvstd, input, weight, bias, runningMean, runningVar, ctraining, cmomentum, ceps) -} -func AtgNativeLayerNorm(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, m int64, n int64, eps float64) { - cm := *(*C.int64_t)(unsafe.Pointer(&m)) - cn := *(*C.int64_t)(unsafe.Pointer(&n)) - ceps := *(*C.double)(unsafe.Pointer(&eps)) - C.atg_native_layer_norm(ptr, input, weight, bias, cm, cn, ceps) -} -func AtgNativeNorm(ptr *Ctensor, self Ctensor) { - C.atg_native_norm(ptr, self) -} -func AtgNe(ptr *Ctensor, self Ctensor, other Cscalar) { - C.atg_ne(ptr, self, other) -} -func AtgNe1(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg_ne1(ptr, self, other) -} -func AtgNe_(ptr *Ctensor, self Ctensor, other Cscalar) { - C.atg_ne_(ptr, self, other) -} -func AtgNe1_(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg_ne_1(ptr, self, other) -} -func AtgNeOut(ptr *Ctensor, out Ctensor, self Ctensor, other Cscalar) { - C.atg_ne_out(ptr, out, self, other) -} -func AtgNeOut1(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor) { - C.atg_ne_out1(ptr, out, self, other) -} -func AtgNeg(ptr *Ctensor, self Ctensor) { - C.atg_neg(ptr, self) -} -func AtgNeg_(ptr *Ctensor, self Ctensor) { - C.atg_neg_(ptr, self) -} -func AtgNegOut(ptr *Ctensor, out Ctensor, self Ctensor) { - C.atg_neg_out(ptr, out, self) -} -func AtgNewEmpty(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32) { - csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) - csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) - coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) - coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) - C.atg_new_empty(ptr, self, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice) -} -func AtgNewFull(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int, fillValue Cscalar, optionsKind int32, optionsDevice int32) { - csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) - csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) - coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) - coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) - C.atg_new_full(ptr, self, csizeDataPtr, csizeLen, fillValue, coptionsKind, coptionsDevice) -} -func AtgNewZeros(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32) { - csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) - csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) - coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) - coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) - C.atg_new_zeros(ptr, self, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice) -} -func AtgNllLoss(ptr *Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64, ignoreIndex int64) { - creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) - cignoreIndex := *(*C.int64_t)(unsafe.Pointer(&ignoreIndex)) - C.atg_nll_loss(ptr, self, target, weight, creduction, cignoreIndex) -} -func AtgNllLoss2d(ptr *Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64, ignoreIndex int64) { - creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) - cignoreIndex := *(*C.int64_t)(unsafe.Pointer(&ignoreIndex)) - C.atg_nll_loss2d(ptr, self, target, weight, creduction, cignoreIndex) -} -func AtgNllLoss2dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64, ignoreIndex int64, totalWeight Ctensor) { - creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) - cignoreIndex := *(*C.int64_t)(unsafe.Pointer(&ignoreIndex)) - C.atg_nll_loss2d_backward(ptr, gradOutput, self, target, weight, creduction, cignoreIndex, totalWeight) -} -func AtgNllLoss2dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64, ignoreIndex int64, totalWeight Ctensor) { - creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) - cignoreIndex := *(*C.int64_t)(unsafe.Pointer(&ignoreIndex)) - C.atg_nll_loss2d_backward_out(ptr, gradInput, gradOutput, self, target, weight, creduction, cignoreIndex, totalWeight) -} -func AtgNllLoss2dOut(ptr *Ctensor, out Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64, ignoreIndex int64) { - creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) - cignoreIndex := *(*C.int64_t)(unsafe.Pointer(&ignoreIndex)) - C.atg_nll_loss2d_out(ptr, out, self, target, weight, creduction, cignoreIndex) -} -func AtgNllLossBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64, ignoreIndex int64, totalWeight Ctensor) { - creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) - cignoreIndex := *(*C.int64_t)(unsafe.Pointer(&ignoreIndex)) - C.atg_nll_loss_backward(ptr, gradOutput, self, target, weight, creduction, cignoreIndex, totalWeight) -} -func AtgNllLossBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64, ignoreIndex int64, totalWeight Ctensor) { - creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) - cignoreIndex := *(*C.int64_t)(unsafe.Pointer(&ignoreIndex)) - C.atg_nll_loss_backward_out(ptr, gradInput, gradOutput, self, target, weight, creduction, cignoreIndex, totalWeight) -} -func AtgNllLossOut(ptr *Ctensor, out Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64, ignoreIndex int64) { - creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) - cignoreIndex := *(*C.int64_t)(unsafe.Pointer(&ignoreIndex)) - C.atg_nll_loss_out(ptr, out, self, target, weight, creduction, cignoreIndex) -} -func AtgNonzero(ptr *Ctensor, self Ctensor) { - C.atg_nonzero(ptr, self) +func AtgAvgPool1d(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, ceilMode int32, countIncludePad int32){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) +ccountIncludePad := *(*C.int)(unsafe.Pointer(&countIncludePad)) +C.atg_avg_pool1d(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cceilMode, ccountIncludePad) +} +func AtgAvgPool2d(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, ceilMode int32, countIncludePad int32, divisorOverrideVal int64, divisorOverrideNull int){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) +ccountIncludePad := *(*C.int)(unsafe.Pointer(&countIncludePad)) +cdivisorOverrideVal := *(*C.int64_t)(unsafe.Pointer(&divisorOverrideVal)) +cdivisorOverrideNull := *(*C.uint8_t)(unsafe.Pointer(&divisorOverrideNull)) +C.atg_avg_pool2d(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cceilMode, ccountIncludePad, cdivisorOverrideVal, cdivisorOverrideNull) +} +func AtgAvgPool2dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, ceilMode int32, countIncludePad int32, divisorOverrideVal int64, divisorOverrideNull int){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) +ccountIncludePad := *(*C.int)(unsafe.Pointer(&countIncludePad)) +cdivisorOverrideVal := *(*C.int64_t)(unsafe.Pointer(&divisorOverrideVal)) +cdivisorOverrideNull := *(*C.uint8_t)(unsafe.Pointer(&divisorOverrideNull)) +C.atg_avg_pool2d_backward(ptr, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cceilMode, ccountIncludePad, cdivisorOverrideVal, cdivisorOverrideNull) +} +func AtgAvgPool2dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, ceilMode int32, countIncludePad int32, divisorOverrideVal int64, divisorOverrideNull int){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) +ccountIncludePad := *(*C.int)(unsafe.Pointer(&countIncludePad)) +cdivisorOverrideVal := *(*C.int64_t)(unsafe.Pointer(&divisorOverrideVal)) +cdivisorOverrideNull := *(*C.uint8_t)(unsafe.Pointer(&divisorOverrideNull)) +C.atg_avg_pool2d_backward_out(ptr, gradInput, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cceilMode, ccountIncludePad, cdivisorOverrideVal, cdivisorOverrideNull) +} +func AtgAvgPool2dOut(ptr *Ctensor, out Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, ceilMode int32, countIncludePad int32, divisorOverrideVal int64, divisorOverrideNull int){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) +ccountIncludePad := *(*C.int)(unsafe.Pointer(&countIncludePad)) +cdivisorOverrideVal := *(*C.int64_t)(unsafe.Pointer(&divisorOverrideVal)) +cdivisorOverrideNull := *(*C.uint8_t)(unsafe.Pointer(&divisorOverrideNull)) +C.atg_avg_pool2d_out(ptr, out, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cceilMode, ccountIncludePad, cdivisorOverrideVal, cdivisorOverrideNull) +} +func AtgAvgPool3d(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, ceilMode int32, countIncludePad int32, divisorOverrideVal int64, divisorOverrideNull int){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) +ccountIncludePad := *(*C.int)(unsafe.Pointer(&countIncludePad)) +cdivisorOverrideVal := *(*C.int64_t)(unsafe.Pointer(&divisorOverrideVal)) +cdivisorOverrideNull := *(*C.uint8_t)(unsafe.Pointer(&divisorOverrideNull)) +C.atg_avg_pool3d(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cceilMode, ccountIncludePad, cdivisorOverrideVal, cdivisorOverrideNull) +} +func AtgAvgPool3dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, ceilMode int32, countIncludePad int32, divisorOverrideVal int64, divisorOverrideNull int){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) +ccountIncludePad := *(*C.int)(unsafe.Pointer(&countIncludePad)) +cdivisorOverrideVal := *(*C.int64_t)(unsafe.Pointer(&divisorOverrideVal)) +cdivisorOverrideNull := *(*C.uint8_t)(unsafe.Pointer(&divisorOverrideNull)) +C.atg_avg_pool3d_backward(ptr, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cceilMode, ccountIncludePad, cdivisorOverrideVal, cdivisorOverrideNull) +} +func AtgAvgPool3dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, ceilMode int32, countIncludePad int32, divisorOverrideVal int64, divisorOverrideNull int){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) +ccountIncludePad := *(*C.int)(unsafe.Pointer(&countIncludePad)) +cdivisorOverrideVal := *(*C.int64_t)(unsafe.Pointer(&divisorOverrideVal)) +cdivisorOverrideNull := *(*C.uint8_t)(unsafe.Pointer(&divisorOverrideNull)) +C.atg_avg_pool3d_backward_out(ptr, gradInput, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cceilMode, ccountIncludePad, cdivisorOverrideVal, cdivisorOverrideNull) +} +func AtgAvgPool3dOut(ptr *Ctensor, out Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, ceilMode int32, countIncludePad int32, divisorOverrideVal int64, divisorOverrideNull int){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) +ccountIncludePad := *(*C.int)(unsafe.Pointer(&countIncludePad)) +cdivisorOverrideVal := *(*C.int64_t)(unsafe.Pointer(&divisorOverrideVal)) +cdivisorOverrideNull := *(*C.uint8_t)(unsafe.Pointer(&divisorOverrideNull)) +C.atg_avg_pool3d_out(ptr, out, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cceilMode, ccountIncludePad, cdivisorOverrideVal, cdivisorOverrideNull) +} +func AtgBaddbmm(ptr *Ctensor, self Ctensor, batch1 Ctensor, batch2 Ctensor){ +C.atg_baddbmm(ptr, self, batch1, batch2) +} +func AtgBaddbmm_(ptr *Ctensor, self Ctensor, batch1 Ctensor, batch2 Ctensor){ +C.atg_baddbmm_(ptr, self, batch1, batch2) +} +func AtgBaddbmmOut(ptr *Ctensor, out Ctensor, self Ctensor, batch1 Ctensor, batch2 Ctensor){ +C.atg_baddbmm_out(ptr, out, self, batch1, batch2) +} +func AtgBartlettWindow(ptr *Ctensor, windowLength int64, optionsKind int32, optionsDevice int32){ +cwindowLength := *(*C.int64_t)(unsafe.Pointer(&windowLength)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_bartlett_window(ptr, cwindowLength, coptionsKind, coptionsDevice) +} +func AtgBartlettWindow1(ptr *Ctensor, windowLength int64, periodic int32, optionsKind int32, optionsDevice int32){ +cwindowLength := *(*C.int64_t)(unsafe.Pointer(&windowLength)) +cperiodic := *(*C.int)(unsafe.Pointer(&periodic)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_bartlett_window1(ptr, cwindowLength, cperiodic, coptionsKind, coptionsDevice) +} +func AtgBatchNorm(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, runningMean Ctensor, runningVar Ctensor, training int32, momentum float64, eps float64, cudnnEnabled int32){ +ctraining := *(*C.int)(unsafe.Pointer(&training)) +cmomentum := *(*C.double)(unsafe.Pointer(&momentum)) +ceps := *(*C.double)(unsafe.Pointer(&eps)) +ccudnnEnabled := *(*C.int)(unsafe.Pointer(&cudnnEnabled)) +C.atg_batch_norm(ptr, input, weight, bias, runningMean, runningVar, ctraining, cmomentum, ceps, ccudnnEnabled) +} +func AtgBatchNormBackwardElemt(ptr *Ctensor, gradOut Ctensor, input Ctensor, mean Ctensor, invstd Ctensor, weight Ctensor, meanDy Ctensor, meanDyXmu Ctensor){ +C.atg_batch_norm_backward_elemt(ptr, gradOut, input, mean, invstd, weight, meanDy, meanDyXmu) +} +func AtgBatchNormBackwardReduce(ptr *Ctensor, gradOut Ctensor, input Ctensor, mean Ctensor, invstd Ctensor, weight Ctensor, inputG int32, weightG int32, biasG int32){ +cinputG := *(*C.int)(unsafe.Pointer(&inputG)) +cweightG := *(*C.int)(unsafe.Pointer(&weightG)) +cbiasG := *(*C.int)(unsafe.Pointer(&biasG)) +C.atg_batch_norm_backward_reduce(ptr, gradOut, input, mean, invstd, weight, cinputG, cweightG, cbiasG) +} +func AtgBatchNormElemt(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, mean Ctensor, invstd Ctensor, eps float64){ +ceps := *(*C.double)(unsafe.Pointer(&eps)) +C.atg_batch_norm_elemt(ptr, input, weight, bias, mean, invstd, ceps) +} +func AtgBatchNormElemtOut(ptr *Ctensor, out Ctensor, input Ctensor, weight Ctensor, bias Ctensor, mean Ctensor, invstd Ctensor, eps float64){ +ceps := *(*C.double)(unsafe.Pointer(&eps)) +C.atg_batch_norm_elemt_out(ptr, out, input, weight, bias, mean, invstd, ceps) +} +func AtgBatchNormGatherStats(ptr *Ctensor, input Ctensor, mean Ctensor, invstd Ctensor, runningMean Ctensor, runningVar Ctensor, momentum float64, eps float64, count int64){ +cmomentum := *(*C.double)(unsafe.Pointer(&momentum)) +ceps := *(*C.double)(unsafe.Pointer(&eps)) +ccount := *(*C.int64_t)(unsafe.Pointer(&count)) +C.atg_batch_norm_gather_stats(ptr, input, mean, invstd, runningMean, runningVar, cmomentum, ceps, ccount) +} +func AtgBatchNormGatherStatsWithCounts(ptr *Ctensor, input Ctensor, mean Ctensor, invstd Ctensor, runningMean Ctensor, runningVar Ctensor, momentum float64, eps float64, counts Ctensor){ +cmomentum := *(*C.double)(unsafe.Pointer(&momentum)) +ceps := *(*C.double)(unsafe.Pointer(&eps)) +C.atg_batch_norm_gather_stats_with_counts(ptr, input, mean, invstd, runningMean, runningVar, cmomentum, ceps, counts) +} +func AtgBatchNormStats(ptr *Ctensor, input Ctensor, eps float64){ +ceps := *(*C.double)(unsafe.Pointer(&eps)) +C.atg_batch_norm_stats(ptr, input, ceps) +} +func AtgBatchNormUpdateStats(ptr *Ctensor, input Ctensor, runningMean Ctensor, runningVar Ctensor, momentum float64){ +cmomentum := *(*C.double)(unsafe.Pointer(&momentum)) +C.atg_batch_norm_update_stats(ptr, input, runningMean, runningVar, cmomentum) +} +func AtgBernoulli(ptr *Ctensor, self Ctensor){ +C.atg_bernoulli(ptr, self) +} +func AtgBernoulli1(ptr *Ctensor, self Ctensor, p float64){ +cp := *(*C.double)(unsafe.Pointer(&p)) +C.atg_bernoulli1(ptr, self, cp) +} +func AtgBernoulli_(ptr *Ctensor, self Ctensor, p Ctensor){ +C.atg_bernoulli_(ptr, self, p) +} +func AtgBernoulli1_(ptr *Ctensor, self Ctensor, p float64){ +cp := *(*C.double)(unsafe.Pointer(&p)) +C.atg_bernoulli_1(ptr, self, cp) +} +func AtgBernoulliOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_bernoulli_out(ptr, out, self) +} +func AtgBilinear(ptr *Ctensor, input1 Ctensor, input2 Ctensor, weight Ctensor, bias Ctensor){ +C.atg_bilinear(ptr, input1, input2, weight, bias) +} +func AtgBinaryCrossEntropy(ptr *Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +C.atg_binary_cross_entropy(ptr, self, target, weight, creduction) +} +func AtgBinaryCrossEntropyBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +C.atg_binary_cross_entropy_backward(ptr, gradOutput, self, target, weight, creduction) +} +func AtgBinaryCrossEntropyBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +C.atg_binary_cross_entropy_backward_out(ptr, gradInput, gradOutput, self, target, weight, creduction) +} +func AtgBinaryCrossEntropyOut(ptr *Ctensor, out Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +C.atg_binary_cross_entropy_out(ptr, out, self, target, weight, creduction) +} +func AtgBinaryCrossEntropyWithLogits(ptr *Ctensor, self Ctensor, target Ctensor, weight Ctensor, posWeight Ctensor, reduction int64){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +C.atg_binary_cross_entropy_with_logits(ptr, self, target, weight, posWeight, creduction) +} +func AtgBinaryCrossEntropyWithLogitsBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, weight Ctensor, posWeight Ctensor, reduction int64){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +C.atg_binary_cross_entropy_with_logits_backward(ptr, gradOutput, self, target, weight, posWeight, creduction) +} +func AtgBincount(ptr *Ctensor, self Ctensor, weights Ctensor, minlength int64){ +cminlength := *(*C.int64_t)(unsafe.Pointer(&minlength)) +C.atg_bincount(ptr, self, weights, cminlength) +} +func AtgBinomial(ptr *Ctensor, count Ctensor, prob Ctensor){ +C.atg_binomial(ptr, count, prob) +} +func AtgBitwiseAnd(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_bitwise_and(ptr, self, other ) +} +func AtgBitwiseAnd1(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_bitwise_and1(ptr, self, other) +} +func AtgBitwiseAnd_(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_bitwise_and_(ptr, self, other ) +} +func AtgBitwiseAnd1_(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_bitwise_and_1(ptr, self, other) +} +func AtgBitwiseAndOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ +C.atg_bitwise_and_out(ptr, out, self, other) +} +func AtgBitwiseAndOut1(ptr *Ctensor, out Ctensor, self Ctensor, other Cscalar){ +C.atg_bitwise_and_out1(ptr, out, self, other ) +} +func AtgBitwiseNot(ptr *Ctensor, self Ctensor){ +C.atg_bitwise_not(ptr, self) +} +func AtgBitwiseNot_(ptr *Ctensor, self Ctensor){ +C.atg_bitwise_not_(ptr, self) +} +func AtgBitwiseNotOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_bitwise_not_out(ptr, out, self) +} +func AtgBitwiseOr(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_bitwise_or(ptr, self, other ) +} +func AtgBitwiseOr1(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_bitwise_or1(ptr, self, other) +} +func AtgBitwiseOr_(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_bitwise_or_(ptr, self, other ) +} +func AtgBitwiseOr1_(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_bitwise_or_1(ptr, self, other) +} +func AtgBitwiseOrOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ +C.atg_bitwise_or_out(ptr, out, self, other) +} +func AtgBitwiseOrOut1(ptr *Ctensor, out Ctensor, self Ctensor, other Cscalar){ +C.atg_bitwise_or_out1(ptr, out, self, other ) +} +func AtgBitwiseXor(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_bitwise_xor(ptr, self, other ) +} +func AtgBitwiseXor1(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_bitwise_xor1(ptr, self, other) +} +func AtgBitwiseXor_(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_bitwise_xor_(ptr, self, other ) +} +func AtgBitwiseXor1_(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_bitwise_xor_1(ptr, self, other) +} +func AtgBitwiseXorOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ +C.atg_bitwise_xor_out(ptr, out, self, other) +} +func AtgBitwiseXorOut1(ptr *Ctensor, out Ctensor, self Ctensor, other Cscalar){ +C.atg_bitwise_xor_out1(ptr, out, self, other ) +} +func AtgBlackmanWindow(ptr *Ctensor, windowLength int64, optionsKind int32, optionsDevice int32){ +cwindowLength := *(*C.int64_t)(unsafe.Pointer(&windowLength)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_blackman_window(ptr, cwindowLength, coptionsKind, coptionsDevice) +} +func AtgBlackmanWindow1(ptr *Ctensor, windowLength int64, periodic int32, optionsKind int32, optionsDevice int32){ +cwindowLength := *(*C.int64_t)(unsafe.Pointer(&windowLength)) +cperiodic := *(*C.int)(unsafe.Pointer(&periodic)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_blackman_window1(ptr, cwindowLength, cperiodic, coptionsKind, coptionsDevice) +} +func AtgBlockDiag(ptr *Ctensor, tensorsData []Ctensor, tensorsLen int){ +ctensorsDataPtr := (*Ctensor)(unsafe.Pointer(&tensorsData[0])) +ctensorsLen := *(*C.int)(unsafe.Pointer(&tensorsLen)) +C.atg_block_diag(ptr, ctensorsDataPtr, ctensorsLen) +} +func AtgBmm(ptr *Ctensor, self Ctensor, mat2 Ctensor){ +C.atg_bmm(ptr, self, mat2) +} +func AtgBmmOut(ptr *Ctensor, out Ctensor, self Ctensor, mat2 Ctensor){ +C.atg_bmm_out(ptr, out, self, mat2) } -func AtgNonzeroOut(ptr *Ctensor, out Ctensor, self Ctensor) { - C.atg_nonzero_out(ptr, out, self) -} -func AtgNorm(ptr *Ctensor, self Ctensor) { - C.atg_norm(ptr, self) -} -func AtgNorm1(ptr *Ctensor, self Ctensor, p Cscalar, dtype int32) { - cdtype := *(*C.int)(unsafe.Pointer(&dtype)) - C.atg_norm1(ptr, self, p, cdtype) -} -func AtgNorm2(ptr *Ctensor, self Ctensor, p Cscalar, dimData []int64, dimLen int, keepdim int32) { - cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) - cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) - ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) - C.atg_norm2(ptr, self, p, cdimDataPtr, cdimLen, ckeepdim) -} -func AtgNorm3(ptr *Ctensor, self Ctensor, p Cscalar, dimData []int64, dimLen int, keepdim int32, dtype int32) { - cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) - cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) - ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) - cdtype := *(*C.int)(unsafe.Pointer(&dtype)) - C.atg_norm3(ptr, self, p, cdimDataPtr, cdimLen, ckeepdim, cdtype) -} -func AtgNormExceptDim(ptr *Ctensor, v Ctensor, pow int64, dim int64) { - cpow := *(*C.int64_t)(unsafe.Pointer(&pow)) - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - C.atg_norm_except_dim(ptr, v, cpow, cdim) -} -func AtgNormOut(ptr *Ctensor, out Ctensor, self Ctensor, p Cscalar, dimData []int64, dimLen int, keepdim int32) { - cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) - cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) - ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) - C.atg_norm_out(ptr, out, self, p, cdimDataPtr, cdimLen, ckeepdim) -} -func AtgNormOut1(ptr *Ctensor, out Ctensor, self Ctensor, p Cscalar, dimData []int64, dimLen int, keepdim int32, dtype int32) { - cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) - cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) - ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) - cdtype := *(*C.int)(unsafe.Pointer(&dtype)) - C.atg_norm_out1(ptr, out, self, p, cdimDataPtr, cdimLen, ckeepdim, cdtype) -} -func AtgNormal_(ptr *Ctensor, self Ctensor, mean float64, std float64) { - cmean := *(*C.double)(unsafe.Pointer(&mean)) - cstd := *(*C.double)(unsafe.Pointer(&std)) - C.atg_normal_(ptr, self, cmean, cstd) -} -func AtgNormalOut(ptr *Ctensor, out Ctensor, mean Ctensor, std float64) { - cstd := *(*C.double)(unsafe.Pointer(&std)) - C.atg_normal_out(ptr, out, mean, cstd) -} -func AtgNormalOut1(ptr *Ctensor, out Ctensor, mean float64, std Ctensor) { - cmean := *(*C.double)(unsafe.Pointer(&mean)) - C.atg_normal_out1(ptr, out, cmean, std) -} -func AtgNormalOut2(ptr *Ctensor, out Ctensor, mean Ctensor, std Ctensor) { - C.atg_normal_out2(ptr, out, mean, std) -} -func AtgNormalOut3(ptr *Ctensor, out Ctensor, mean float64, std float64, sizeData []int64, sizeLen int) { - cmean := *(*C.double)(unsafe.Pointer(&mean)) - cstd := *(*C.double)(unsafe.Pointer(&std)) - csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) - csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) - C.atg_normal_out3(ptr, out, cmean, cstd, csizeDataPtr, csizeLen) -} -func AtgNuclearNorm(ptr *Ctensor, self Ctensor, keepdim int32) { - ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) - C.atg_nuclear_norm(ptr, self, ckeepdim) -} -func AtgNuclearNorm1(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32) { - cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) - cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) - ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) - C.atg_nuclear_norm1(ptr, self, cdimDataPtr, cdimLen, ckeepdim) -} -func AtgNuclearNormOut(ptr *Ctensor, out Ctensor, self Ctensor, keepdim int32) { - ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) - C.atg_nuclear_norm_out(ptr, out, self, ckeepdim) -} -func AtgNuclearNormOut1(ptr *Ctensor, out Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32) { - cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) - cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) - ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) - C.atg_nuclear_norm_out1(ptr, out, self, cdimDataPtr, cdimLen, ckeepdim) -} -func AtgNumpyT(ptr *Ctensor, self Ctensor) { - C.atg_numpy_t(ptr, self) -} -func AtgOneHot(ptr *Ctensor, self Ctensor, numClasses int64) { - cnumClasses := *(*C.int64_t)(unsafe.Pointer(&numClasses)) - C.atg_one_hot(ptr, self, cnumClasses) -} -func AtgOnes(ptr *Ctensor, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32) { - csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) - csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) - coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) - coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) - C.atg_ones(ptr, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice) -} -func AtgOnesLike(ptr *Ctensor, self Ctensor) { - C.atg_ones_like(ptr, self) -} -func AtgOnesOut(ptr *Ctensor, out Ctensor, sizeData []int64, sizeLen int) { - csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) - csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) - C.atg_ones_out(ptr, out, csizeDataPtr, csizeLen) -} -func AtgOrgqr(ptr *Ctensor, self Ctensor, input2 Ctensor) { - C.atg_orgqr(ptr, self, input2) -} -func AtgOrgqrOut(ptr *Ctensor, out Ctensor, self Ctensor, input2 Ctensor) { - C.atg_orgqr_out(ptr, out, self, input2) -} -func AtgOrmqr(ptr *Ctensor, self Ctensor, input2 Ctensor, input3 Ctensor, left int32, transpose int32) { - cleft := *(*C.int)(unsafe.Pointer(&left)) - ctranspose := *(*C.int)(unsafe.Pointer(&transpose)) - C.atg_ormqr(ptr, self, input2, input3, cleft, ctranspose) -} -func AtgOrmqrOut(ptr *Ctensor, out Ctensor, self Ctensor, input2 Ctensor, input3 Ctensor, left int32, transpose int32) { - cleft := *(*C.int)(unsafe.Pointer(&left)) - ctranspose := *(*C.int)(unsafe.Pointer(&transpose)) - C.atg_ormqr_out(ptr, out, self, input2, input3, cleft, ctranspose) -} -func AtgPairwiseDistance(ptr *Ctensor, x1 Ctensor, x2 Ctensor, p float64, eps float64, keepdim int32) { - cp := *(*C.double)(unsafe.Pointer(&p)) - ceps := *(*C.double)(unsafe.Pointer(&eps)) - ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) - C.atg_pairwise_distance(ptr, x1, x2, cp, ceps, ckeepdim) -} -func AtgPdist(ptr *Ctensor, self Ctensor, p float64) { - cp := *(*C.double)(unsafe.Pointer(&p)) - C.atg_pdist(ptr, self, cp) -} -func AtgPermute(ptr *Ctensor, self Ctensor, dimsData []int64, dimsLen int) { - cdimsDataPtr := (*C.int64_t)(unsafe.Pointer(&dimsData[0])) - cdimsLen := *(*C.int)(unsafe.Pointer(&dimsLen)) - C.atg_permute(ptr, self, cdimsDataPtr, cdimsLen) -} -func AtgPinMemory(ptr *Ctensor, self Ctensor) { - C.atg_pin_memory(ptr, self) -} -func AtgPinverse(ptr *Ctensor, self Ctensor, rcond float64) { - crcond := *(*C.double)(unsafe.Pointer(&rcond)) - C.atg_pinverse(ptr, self, crcond) -} -func AtgPixelShuffle(ptr *Ctensor, self Ctensor, upscaleFactor int64) { - cupscaleFactor := *(*C.int64_t)(unsafe.Pointer(&upscaleFactor)) - C.atg_pixel_shuffle(ptr, self, cupscaleFactor) -} -func AtgPoisson(ptr *Ctensor, self Ctensor) { - C.atg_poisson(ptr, self) -} -func AtgPoissonNllLoss(ptr *Ctensor, input Ctensor, target Ctensor, logInput int32, full int32, eps float64, reduction int64) { - clogInput := *(*C.int)(unsafe.Pointer(&logInput)) - cfull := *(*C.int)(unsafe.Pointer(&full)) - ceps := *(*C.double)(unsafe.Pointer(&eps)) - creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) - C.atg_poisson_nll_loss(ptr, input, target, clogInput, cfull, ceps, creduction) -} -func AtgPolygamma(ptr *Ctensor, n int64, self Ctensor) { - cn := *(*C.int64_t)(unsafe.Pointer(&n)) - C.atg_polygamma(ptr, cn, self) -} -func AtgPolygamma_(ptr *Ctensor, self Ctensor, n int64) { - cn := *(*C.int64_t)(unsafe.Pointer(&n)) - C.atg_polygamma_(ptr, self, cn) -} -func AtgPolygammaOut(ptr *Ctensor, out Ctensor, n int64, self Ctensor) { - cn := *(*C.int64_t)(unsafe.Pointer(&n)) - C.atg_polygamma_out(ptr, out, cn, self) -} -func AtgPow(ptr *Ctensor, self Ctensor, exponent Cscalar) { - C.atg_pow(ptr, self, exponent) -} -func AtgPow1(ptr *Ctensor, self Ctensor, exponent Ctensor) { - C.atg_pow1(ptr, self, exponent) -} -func AtgPow2(ptr *Ctensor, selfScalar Cscalar, exponent Ctensor) { - C.atg_pow2(ptr, selfScalar, exponent) -} -func AtgPow_(ptr *Ctensor, self Ctensor, exponent Cscalar) { - C.atg_pow_(ptr, self, exponent) -} -func AtgPow1_(ptr *Ctensor, self Ctensor, exponent Ctensor) { - C.atg_pow_1(ptr, self, exponent) -} -func AtgPowOut(ptr *Ctensor, out Ctensor, self Ctensor, exponent Cscalar) { - C.atg_pow_out(ptr, out, self, exponent) -} -func AtgPowOut1(ptr *Ctensor, out Ctensor, self Ctensor, exponent Ctensor) { - C.atg_pow_out1(ptr, out, self, exponent) -} -func AtgPowOut2(ptr *Ctensor, out Ctensor, selfScalar Cscalar, exponent Ctensor) { - C.atg_pow_out2(ptr, out, selfScalar, exponent) -} -func AtgPrelu(ptr *Ctensor, self Ctensor, weight Ctensor) { - C.atg_prelu(ptr, self, weight) -} -func AtgPreluBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, weight Ctensor) { - C.atg_prelu_backward(ptr, gradOutput, self, weight) -} -func AtgProd(ptr *Ctensor, self Ctensor, dtype int32) { - cdtype := *(*C.int)(unsafe.Pointer(&dtype)) - C.atg_prod(ptr, self, cdtype) -} -func AtgProd1(ptr *Ctensor, self Ctensor, dim int64, keepdim int32, dtype int32) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) - cdtype := *(*C.int)(unsafe.Pointer(&dtype)) - C.atg_prod1(ptr, self, cdim, ckeepdim, cdtype) -} -func AtgProdOut(ptr *Ctensor, out Ctensor, self Ctensor, dim int64, keepdim int32, dtype int32) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) - cdtype := *(*C.int)(unsafe.Pointer(&dtype)) - C.atg_prod_out(ptr, out, self, cdim, ckeepdim, cdtype) -} -func AtgPut_(ptr *Ctensor, self Ctensor, index Ctensor, source Ctensor, accumulate int32) { - caccumulate := *(*C.int)(unsafe.Pointer(&accumulate)) - C.atg_put_(ptr, self, index, source, caccumulate) -} -func AtgQPerChannelScales(ptr *Ctensor, self Ctensor) { - C.atg_q_per_channel_scales(ptr, self) -} -func AtgQPerChannelZeroPoints(ptr *Ctensor, self Ctensor) { - C.atg_q_per_channel_zero_points(ptr, self) -} -func AtgQr(ptr *Ctensor, self Ctensor, some int32) { - csome := *(*C.int)(unsafe.Pointer(&some)) - C.atg_qr(ptr, self, csome) -} -func AtgQrOut(ptr *Ctensor, q Ctensor, r Ctensor, self Ctensor, some int32) { - csome := *(*C.int)(unsafe.Pointer(&some)) - C.atg_qr_out(ptr, q, r, self, csome) -} -func AtgQuantizePerChannel(ptr *Ctensor, self Ctensor, scales Ctensor, zeroPoints Ctensor, axis int64, dtype int32) { - caxis := *(*C.int64_t)(unsafe.Pointer(&axis)) - cdtype := *(*C.int)(unsafe.Pointer(&dtype)) - C.atg_quantize_per_channel(ptr, self, scales, zeroPoints, caxis, cdtype) -} -func AtgQuantizePerTensor(ptr *Ctensor, self Ctensor, scale float64, zeroPoint int64, dtype int32) { - cscale := *(*C.double)(unsafe.Pointer(&scale)) - czeroPoint := *(*C.int64_t)(unsafe.Pointer(&zeroPoint)) - cdtype := *(*C.int)(unsafe.Pointer(&dtype)) - C.atg_quantize_per_tensor(ptr, self, cscale, czeroPoint, cdtype) -} -func AtgQuantizedBatchNorm(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, mean Ctensor, vari Ctensor, eps float64, outputScale float64, outputZeroPoint int64) { - ceps := *(*C.double)(unsafe.Pointer(&eps)) - coutputScale := *(*C.double)(unsafe.Pointer(&outputScale)) - coutputZeroPoint := *(*C.int64_t)(unsafe.Pointer(&outputZeroPoint)) - C.atg_quantized_batch_norm(ptr, input, weight, bias, mean, vari, ceps, coutputScale, coutputZeroPoint) -} -func AtgQuantizedGru(ptr *Ctensor, input Ctensor, hx Ctensor, paramsData []Ctensor, paramsLen int, hasBiases int32, numLayers int64, dropout float64, train int32, bidirectional int32, batchFirst int32) { - cparamsDataPtr := (*Ctensor)(unsafe.Pointer(¶msData[0])) - cparamsLen := *(*C.int)(unsafe.Pointer(¶msLen)) - chasBiases := *(*C.int)(unsafe.Pointer(&hasBiases)) - cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) - cdropout := *(*C.double)(unsafe.Pointer(&dropout)) - ctrain := *(*C.int)(unsafe.Pointer(&train)) - cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) - cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst)) - C.atg_quantized_gru(ptr, input, hx, cparamsDataPtr, cparamsLen, chasBiases, cnumLayers, cdropout, ctrain, cbidirectional, cbatchFirst) -} -func AtgQuantizedGru1(ptr *Ctensor, data Ctensor, batchSizes Ctensor, hx Ctensor, paramsData []Ctensor, paramsLen int, hasBiases int32, numLayers int64, dropout float64, train int32, bidirectional int32) { - cparamsDataPtr := (*Ctensor)(unsafe.Pointer(¶msData[0])) - cparamsLen := *(*C.int)(unsafe.Pointer(¶msLen)) - chasBiases := *(*C.int)(unsafe.Pointer(&hasBiases)) - cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) - cdropout := *(*C.double)(unsafe.Pointer(&dropout)) - ctrain := *(*C.int)(unsafe.Pointer(&train)) - cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) - C.atg_quantized_gru1(ptr, data, batchSizes, hx, cparamsDataPtr, cparamsLen, chasBiases, cnumLayers, cdropout, ctrain, cbidirectional) -} -func AtgQuantizedGruCell(ptr *Ctensor, input Ctensor, hx Ctensor, wIh Ctensor, wHh Ctensor, bIh Ctensor, bHh Ctensor, packedIh Ctensor, packedHh Ctensor, colOffsetsIh Ctensor, colOffsetsHh Ctensor, scaleIh Cscalar, scaleHh Cscalar, zeroPointIh Cscalar, zeroPointHh Cscalar) { - C.atg_quantized_gru_cell(ptr, input, hx, wIh, wHh, bIh, bHh, packedIh, packedHh, colOffsetsIh, colOffsetsHh, scaleIh, scaleHh, zeroPointIh, zeroPointHh) -} -func AtgQuantizedLstm(ptr *Ctensor, input Ctensor, hxData []Ctensor, hxLen int, paramsData []Ctensor, paramsLen int, hasBiases int32, numLayers int64, dropout float64, train int32, bidirectional int32, batchFirst int32, dtype int32, useDynamic int32) { - chxDataPtr := (*Ctensor)(unsafe.Pointer(&hxData[0])) - chxLen := *(*C.int)(unsafe.Pointer(&hxLen)) - cparamsDataPtr := (*Ctensor)(unsafe.Pointer(¶msData[0])) - cparamsLen := *(*C.int)(unsafe.Pointer(¶msLen)) - chasBiases := *(*C.int)(unsafe.Pointer(&hasBiases)) - cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) - cdropout := *(*C.double)(unsafe.Pointer(&dropout)) - ctrain := *(*C.int)(unsafe.Pointer(&train)) - cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) - cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst)) - cdtype := *(*C.int)(unsafe.Pointer(&dtype)) - cuseDynamic := *(*C.int)(unsafe.Pointer(&useDynamic)) - C.atg_quantized_lstm(ptr, input, chxDataPtr, chxLen, cparamsDataPtr, cparamsLen, chasBiases, cnumLayers, cdropout, ctrain, cbidirectional, cbatchFirst, cdtype, cuseDynamic) -} -func AtgQuantizedLstm1(ptr *Ctensor, data Ctensor, batchSizes Ctensor, hxData []Ctensor, hxLen int, paramsData []Ctensor, paramsLen int, hasBiases int32, numLayers int64, dropout float64, train int32, bidirectional int32, dtype int32, useDynamic int32) { - chxDataPtr := (*Ctensor)(unsafe.Pointer(&hxData[0])) - chxLen := *(*C.int)(unsafe.Pointer(&hxLen)) - cparamsDataPtr := (*Ctensor)(unsafe.Pointer(¶msData[0])) - cparamsLen := *(*C.int)(unsafe.Pointer(¶msLen)) - chasBiases := *(*C.int)(unsafe.Pointer(&hasBiases)) - cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) - cdropout := *(*C.double)(unsafe.Pointer(&dropout)) - ctrain := *(*C.int)(unsafe.Pointer(&train)) - cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) - cdtype := *(*C.int)(unsafe.Pointer(&dtype)) - cuseDynamic := *(*C.int)(unsafe.Pointer(&useDynamic)) - C.atg_quantized_lstm1(ptr, data, batchSizes, chxDataPtr, chxLen, cparamsDataPtr, cparamsLen, chasBiases, cnumLayers, cdropout, ctrain, cbidirectional, cdtype, cuseDynamic) -} -func AtgQuantizedLstmCell(ptr *Ctensor, input Ctensor, hxData []Ctensor, hxLen int, wIh Ctensor, wHh Ctensor, bIh Ctensor, bHh Ctensor, packedIh Ctensor, packedHh Ctensor, colOffsetsIh Ctensor, colOffsetsHh Ctensor, scaleIh Cscalar, scaleHh Cscalar, zeroPointIh Cscalar, zeroPointHh Cscalar) { - chxDataPtr := (*Ctensor)(unsafe.Pointer(&hxData[0])) - chxLen := *(*C.int)(unsafe.Pointer(&hxLen)) - C.atg_quantized_lstm_cell(ptr, input, chxDataPtr, chxLen, wIh, wHh, bIh, bHh, packedIh, packedHh, colOffsetsIh, colOffsetsHh, scaleIh, scaleHh, zeroPointIh, zeroPointHh) -} -func AtgQuantizedMaxPool2d(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32) { - ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) - ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) - cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) - cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) - C.atg_quantized_max_pool2d(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode) -} -func AtgQuantizedRnnReluCell(ptr *Ctensor, input Ctensor, hx Ctensor, wIh Ctensor, wHh Ctensor, bIh Ctensor, bHh Ctensor, packedIh Ctensor, packedHh Ctensor, colOffsetsIh Ctensor, colOffsetsHh Ctensor, scaleIh Cscalar, scaleHh Cscalar, zeroPointIh Cscalar, zeroPointHh Cscalar) { - C.atg_quantized_rnn_relu_cell(ptr, input, hx, wIh, wHh, bIh, bHh, packedIh, packedHh, colOffsetsIh, colOffsetsHh, scaleIh, scaleHh, zeroPointIh, zeroPointHh) -} -func AtgQuantizedRnnTanhCell(ptr *Ctensor, input Ctensor, hx Ctensor, wIh Ctensor, wHh Ctensor, bIh Ctensor, bHh Ctensor, packedIh Ctensor, packedHh Ctensor, colOffsetsIh Ctensor, colOffsetsHh Ctensor, scaleIh Cscalar, scaleHh Cscalar, zeroPointIh Cscalar, zeroPointHh Cscalar) { - C.atg_quantized_rnn_tanh_cell(ptr, input, hx, wIh, wHh, bIh, bHh, packedIh, packedHh, colOffsetsIh, colOffsetsHh, scaleIh, scaleHh, zeroPointIh, zeroPointHh) -} -func AtgRand(ptr *Ctensor, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32) { - csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) - csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) - coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) - coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) - C.atg_rand(ptr, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice) -} -func AtgRandLike(ptr *Ctensor, self Ctensor) { - C.atg_rand_like(ptr, self) -} -func AtgRandOut(ptr *Ctensor, out Ctensor, sizeData []int64, sizeLen int) { - csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) - csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) - C.atg_rand_out(ptr, out, csizeDataPtr, csizeLen) -} -func AtgRandint(ptr *Ctensor, high int64, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32) { - chigh := *(*C.int64_t)(unsafe.Pointer(&high)) - csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) - csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) - coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) - coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) - C.atg_randint(ptr, chigh, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice) -} -func AtgRandint1(ptr *Ctensor, low int64, high int64, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32) { - clow := *(*C.int64_t)(unsafe.Pointer(&low)) - chigh := *(*C.int64_t)(unsafe.Pointer(&high)) - csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) - csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) - coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) - coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) - C.atg_randint1(ptr, clow, chigh, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice) -} -func AtgRandintLike(ptr *Ctensor, self Ctensor, high int64) { - chigh := *(*C.int64_t)(unsafe.Pointer(&high)) - C.atg_randint_like(ptr, self, chigh) -} -func AtgRandintLike1(ptr *Ctensor, self Ctensor, low int64, high int64) { - clow := *(*C.int64_t)(unsafe.Pointer(&low)) - chigh := *(*C.int64_t)(unsafe.Pointer(&high)) - C.atg_randint_like1(ptr, self, clow, chigh) -} -func AtgRandintOut(ptr *Ctensor, out Ctensor, high int64, sizeData []int64, sizeLen int) { - chigh := *(*C.int64_t)(unsafe.Pointer(&high)) - csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) - csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) - C.atg_randint_out(ptr, out, chigh, csizeDataPtr, csizeLen) -} -func AtgRandintOut1(ptr *Ctensor, out Ctensor, low int64, high int64, sizeData []int64, sizeLen int) { - clow := *(*C.int64_t)(unsafe.Pointer(&low)) - chigh := *(*C.int64_t)(unsafe.Pointer(&high)) - csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) - csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) - C.atg_randint_out1(ptr, out, clow, chigh, csizeDataPtr, csizeLen) -} -func AtgRandn(ptr *Ctensor, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32) { - csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) - csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) - coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) - coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) - C.atg_randn(ptr, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice) -} -func AtgRandnLike(ptr *Ctensor, self Ctensor) { - C.atg_randn_like(ptr, self) -} -func AtgRandnOut(ptr *Ctensor, out Ctensor, sizeData []int64, sizeLen int) { - csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) - csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) - C.atg_randn_out(ptr, out, csizeDataPtr, csizeLen) -} -func AtgRandom_(ptr *Ctensor, self Ctensor) { - C.atg_random_(ptr, self) -} -func AtgRandom1_(ptr *Ctensor, self Ctensor, to int64) { - cto := *(*C.int64_t)(unsafe.Pointer(&to)) - C.atg_random_1(ptr, self, cto) -} -func AtgRandom2(ptr *Ctensor, self Ctensor, from int64, to int64) { - cfrom := *(*C.int64_t)(unsafe.Pointer(&from)) - cto := *(*C.int64_t)(unsafe.Pointer(&to)) - C.atg_random_2(ptr, self, cfrom, cto) -} -func AtgRandperm(ptr *Ctensor, n int64, optionsKind int32, optionsDevice int32) { - cn := *(*C.int64_t)(unsafe.Pointer(&n)) - coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) - coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) - C.atg_randperm(ptr, cn, coptionsKind, coptionsDevice) -} -func AtgRandpermOut(ptr *Ctensor, out Ctensor, n int64) { - cn := *(*C.int64_t)(unsafe.Pointer(&n)) - C.atg_randperm_out(ptr, out, cn) -} -func AtgRange(ptr *Ctensor, start Cscalar, end Cscalar, optionsKind int32, optionsDevice int32) { - coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) - coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) - C.atg_range(ptr, start, end, coptionsKind, coptionsDevice) -} -func AtgRange1(ptr *Ctensor, start Cscalar, end Cscalar, optionsKind int32, optionsDevice int32) { - coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) - coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) - C.atg_range1(ptr, start, end, coptionsKind, coptionsDevice) -} -func AtgRangeOut(ptr *Ctensor, out Ctensor, start Cscalar, end Cscalar) { - C.atg_range_out(ptr, out, start, end) -} -func AtgReal(ptr *Ctensor, self Ctensor) { - C.atg_real(ptr, self) -} -func AtgReciprocal(ptr *Ctensor, self Ctensor) { - C.atg_reciprocal(ptr, self) -} -func AtgReciprocal_(ptr *Ctensor, self Ctensor) { - C.atg_reciprocal_(ptr, self) -} -func AtgReciprocalOut(ptr *Ctensor, out Ctensor, self Ctensor) { - C.atg_reciprocal_out(ptr, out, self) -} -func AtgReflectionPad1d(ptr *Ctensor, self Ctensor, paddingData []int64, paddingLen int) { - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - C.atg_reflection_pad1d(ptr, self, cpaddingDataPtr, cpaddingLen) -} -func AtgReflectionPad1dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int) { - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - C.atg_reflection_pad1d_backward(ptr, gradOutput, self, cpaddingDataPtr, cpaddingLen) -} -func AtgReflectionPad1dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int) { - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - C.atg_reflection_pad1d_backward_out(ptr, gradInput, gradOutput, self, cpaddingDataPtr, cpaddingLen) -} -func AtgReflectionPad1dOut(ptr *Ctensor, out Ctensor, self Ctensor, paddingData []int64, paddingLen int) { - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - C.atg_reflection_pad1d_out(ptr, out, self, cpaddingDataPtr, cpaddingLen) -} -func AtgReflectionPad2d(ptr *Ctensor, self Ctensor, paddingData []int64, paddingLen int) { - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - C.atg_reflection_pad2d(ptr, self, cpaddingDataPtr, cpaddingLen) -} -func AtgReflectionPad2dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int) { - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - C.atg_reflection_pad2d_backward(ptr, gradOutput, self, cpaddingDataPtr, cpaddingLen) -} -func AtgReflectionPad2dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int) { - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - C.atg_reflection_pad2d_backward_out(ptr, gradInput, gradOutput, self, cpaddingDataPtr, cpaddingLen) -} -func AtgReflectionPad2dOut(ptr *Ctensor, out Ctensor, self Ctensor, paddingData []int64, paddingLen int) { - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - C.atg_reflection_pad2d_out(ptr, out, self, cpaddingDataPtr, cpaddingLen) -} -func AtgRelu(ptr *Ctensor, self Ctensor) { - C.atg_relu(ptr, self) -} -func AtgRelu_(ptr *Ctensor, self Ctensor) { - C.atg_relu_(ptr, self) -} -func AtgRemainder(ptr *Ctensor, self Ctensor, other Cscalar) { - C.atg_remainder(ptr, self, other) -} -func AtgRemainder1(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg_remainder1(ptr, self, other) -} -func AtgRemainder_(ptr *Ctensor, self Ctensor, other Cscalar) { - C.atg_remainder_(ptr, self, other) -} -func AtgRemainder1_(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg_remainder_1(ptr, self, other) -} -func AtgRemainderOut(ptr *Ctensor, out Ctensor, self Ctensor, other Cscalar) { - C.atg_remainder_out(ptr, out, self, other) -} -func AtgRemainderOut1(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor) { - C.atg_remainder_out1(ptr, out, self, other) -} -func AtgRenorm(ptr *Ctensor, self Ctensor, p Cscalar, dim int64, maxnorm Cscalar) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - C.atg_renorm(ptr, self, p, cdim, maxnorm) -} -func AtgRenorm_(ptr *Ctensor, self Ctensor, p Cscalar, dim int64, maxnorm Cscalar) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - C.atg_renorm_(ptr, self, p, cdim, maxnorm) -} -func AtgRenormOut(ptr *Ctensor, out Ctensor, self Ctensor, p Cscalar, dim int64, maxnorm Cscalar) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - C.atg_renorm_out(ptr, out, self, p, cdim, maxnorm) -} -func AtgRepeat(ptr *Ctensor, self Ctensor, repeatsData []int64, repeatsLen int) { - crepeatsDataPtr := (*C.int64_t)(unsafe.Pointer(&repeatsData[0])) - crepeatsLen := *(*C.int)(unsafe.Pointer(&repeatsLen)) - C.atg_repeat(ptr, self, crepeatsDataPtr, crepeatsLen) -} -func AtgRepeatInterleave(ptr *Ctensor, repeats Ctensor) { - C.atg_repeat_interleave(ptr, repeats) -} -func AtgRepeatInterleave1(ptr *Ctensor, self Ctensor, repeats Ctensor, dim int64) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - C.atg_repeat_interleave1(ptr, self, repeats, cdim) -} -func AtgRepeatInterleave2(ptr *Ctensor, self Ctensor, repeats int64, dim int64) { - crepeats := *(*C.int64_t)(unsafe.Pointer(&repeats)) - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - C.atg_repeat_interleave2(ptr, self, crepeats, cdim) -} -func AtgReplicationPad1d(ptr *Ctensor, self Ctensor, paddingData []int64, paddingLen int) { - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - C.atg_replication_pad1d(ptr, self, cpaddingDataPtr, cpaddingLen) -} -func AtgReplicationPad1dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int) { - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - C.atg_replication_pad1d_backward(ptr, gradOutput, self, cpaddingDataPtr, cpaddingLen) -} -func AtgReplicationPad1dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int) { - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - C.atg_replication_pad1d_backward_out(ptr, gradInput, gradOutput, self, cpaddingDataPtr, cpaddingLen) -} -func AtgReplicationPad1dOut(ptr *Ctensor, out Ctensor, self Ctensor, paddingData []int64, paddingLen int) { - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - C.atg_replication_pad1d_out(ptr, out, self, cpaddingDataPtr, cpaddingLen) -} -func AtgReplicationPad2d(ptr *Ctensor, self Ctensor, paddingData []int64, paddingLen int) { - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - C.atg_replication_pad2d(ptr, self, cpaddingDataPtr, cpaddingLen) -} -func AtgReplicationPad2dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int) { - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - C.atg_replication_pad2d_backward(ptr, gradOutput, self, cpaddingDataPtr, cpaddingLen) -} -func AtgReplicationPad2dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int) { - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - C.atg_replication_pad2d_backward_out(ptr, gradInput, gradOutput, self, cpaddingDataPtr, cpaddingLen) -} -func AtgReplicationPad2dOut(ptr *Ctensor, out Ctensor, self Ctensor, paddingData []int64, paddingLen int) { - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - C.atg_replication_pad2d_out(ptr, out, self, cpaddingDataPtr, cpaddingLen) -} -func AtgReplicationPad3d(ptr *Ctensor, self Ctensor, paddingData []int64, paddingLen int) { - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - C.atg_replication_pad3d(ptr, self, cpaddingDataPtr, cpaddingLen) -} -func AtgReplicationPad3dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int) { - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - C.atg_replication_pad3d_backward(ptr, gradOutput, self, cpaddingDataPtr, cpaddingLen) -} -func AtgReplicationPad3dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int) { - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - C.atg_replication_pad3d_backward_out(ptr, gradInput, gradOutput, self, cpaddingDataPtr, cpaddingLen) -} -func AtgReplicationPad3dOut(ptr *Ctensor, out Ctensor, self Ctensor, paddingData []int64, paddingLen int) { - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - C.atg_replication_pad3d_out(ptr, out, self, cpaddingDataPtr, cpaddingLen) -} -func AtgRequiresGrad_(ptr *Ctensor, self Ctensor, requiresGrad int32) { - crequiresGrad := *(*C.int)(unsafe.Pointer(&requiresGrad)) - C.atg_requires_grad_(ptr, self, crequiresGrad) -} -func AtgReshape(ptr *Ctensor, self Ctensor, shapeData []int64, shapeLen int) { - cshapeDataPtr := (*C.int64_t)(unsafe.Pointer(&shapeData[0])) - cshapeLen := *(*C.int)(unsafe.Pointer(&shapeLen)) - C.atg_reshape(ptr, self, cshapeDataPtr, cshapeLen) -} -func AtgReshapeAs(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg_reshape_as(ptr, self, other) -} -func AtgResize_(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int) { - csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) - csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) - C.atg_resize_(ptr, self, csizeDataPtr, csizeLen) -} -func AtgResizeAs_(ptr *Ctensor, self Ctensor, theTemplate Ctensor) { - C.atg_resize_as_(ptr, self, theTemplate) -} -func AtgRfft(ptr *Ctensor, self Ctensor, signalNdim int64, normalized int32, onesided int32) { - csignalNdim := *(*C.int64_t)(unsafe.Pointer(&signalNdim)) - cnormalized := *(*C.int)(unsafe.Pointer(&normalized)) - conesided := *(*C.int)(unsafe.Pointer(&onesided)) - C.atg_rfft(ptr, self, csignalNdim, cnormalized, conesided) -} -func AtgRnnRelu(ptr *Ctensor, input Ctensor, hx Ctensor, paramsData []Ctensor, paramsLen int, hasBiases int32, numLayers int64, dropout float64, train int32, bidirectional int32, batchFirst int32) { - cparamsDataPtr := (*Ctensor)(unsafe.Pointer(¶msData[0])) - cparamsLen := *(*C.int)(unsafe.Pointer(¶msLen)) - chasBiases := *(*C.int)(unsafe.Pointer(&hasBiases)) - cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) - cdropout := *(*C.double)(unsafe.Pointer(&dropout)) - ctrain := *(*C.int)(unsafe.Pointer(&train)) - cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) - cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst)) - C.atg_rnn_relu(ptr, input, hx, cparamsDataPtr, cparamsLen, chasBiases, cnumLayers, cdropout, ctrain, cbidirectional, cbatchFirst) -} -func AtgRnnRelu1(ptr *Ctensor, data Ctensor, batchSizes Ctensor, hx Ctensor, paramsData []Ctensor, paramsLen int, hasBiases int32, numLayers int64, dropout float64, train int32, bidirectional int32) { - cparamsDataPtr := (*Ctensor)(unsafe.Pointer(¶msData[0])) - cparamsLen := *(*C.int)(unsafe.Pointer(¶msLen)) - chasBiases := *(*C.int)(unsafe.Pointer(&hasBiases)) - cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) - cdropout := *(*C.double)(unsafe.Pointer(&dropout)) - ctrain := *(*C.int)(unsafe.Pointer(&train)) - cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) - C.atg_rnn_relu1(ptr, data, batchSizes, hx, cparamsDataPtr, cparamsLen, chasBiases, cnumLayers, cdropout, ctrain, cbidirectional) -} -func AtgRnnReluCell(ptr *Ctensor, input Ctensor, hx Ctensor, wIh Ctensor, wHh Ctensor, bIh Ctensor, bHh Ctensor) { - C.atg_rnn_relu_cell(ptr, input, hx, wIh, wHh, bIh, bHh) -} -func AtgRnnTanh(ptr *Ctensor, input Ctensor, hx Ctensor, paramsData []Ctensor, paramsLen int, hasBiases int32, numLayers int64, dropout float64, train int32, bidirectional int32, batchFirst int32) { - cparamsDataPtr := (*Ctensor)(unsafe.Pointer(¶msData[0])) - cparamsLen := *(*C.int)(unsafe.Pointer(¶msLen)) - chasBiases := *(*C.int)(unsafe.Pointer(&hasBiases)) - cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) - cdropout := *(*C.double)(unsafe.Pointer(&dropout)) - ctrain := *(*C.int)(unsafe.Pointer(&train)) - cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) - cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst)) - C.atg_rnn_tanh(ptr, input, hx, cparamsDataPtr, cparamsLen, chasBiases, cnumLayers, cdropout, ctrain, cbidirectional, cbatchFirst) -} -func AtgRnnTanh1(ptr *Ctensor, data Ctensor, batchSizes Ctensor, hx Ctensor, paramsData []Ctensor, paramsLen int, hasBiases int32, numLayers int64, dropout float64, train int32, bidirectional int32) { - cparamsDataPtr := (*Ctensor)(unsafe.Pointer(¶msData[0])) - cparamsLen := *(*C.int)(unsafe.Pointer(¶msLen)) - chasBiases := *(*C.int)(unsafe.Pointer(&hasBiases)) - cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) - cdropout := *(*C.double)(unsafe.Pointer(&dropout)) - ctrain := *(*C.int)(unsafe.Pointer(&train)) - cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) - C.atg_rnn_tanh1(ptr, data, batchSizes, hx, cparamsDataPtr, cparamsLen, chasBiases, cnumLayers, cdropout, ctrain, cbidirectional) -} -func AtgRnnTanhCell(ptr *Ctensor, input Ctensor, hx Ctensor, wIh Ctensor, wHh Ctensor, bIh Ctensor, bHh Ctensor) { - C.atg_rnn_tanh_cell(ptr, input, hx, wIh, wHh, bIh, bHh) -} -func AtgRoll(ptr *Ctensor, self Ctensor, shiftsData []int64, shiftsLen int, dimsData []int64, dimsLen int) { - cshiftsDataPtr := (*C.int64_t)(unsafe.Pointer(&shiftsData[0])) - cshiftsLen := *(*C.int)(unsafe.Pointer(&shiftsLen)) - cdimsDataPtr := (*C.int64_t)(unsafe.Pointer(&dimsData[0])) - cdimsLen := *(*C.int)(unsafe.Pointer(&dimsLen)) - C.atg_roll(ptr, self, cshiftsDataPtr, cshiftsLen, cdimsDataPtr, cdimsLen) -} -func AtgRot90(ptr *Ctensor, self Ctensor, k int64, dimsData []int64, dimsLen int) { - ck := *(*C.int64_t)(unsafe.Pointer(&k)) - cdimsDataPtr := (*C.int64_t)(unsafe.Pointer(&dimsData[0])) - cdimsLen := *(*C.int)(unsafe.Pointer(&dimsLen)) - C.atg_rot90(ptr, self, ck, cdimsDataPtr, cdimsLen) -} -func AtgRound(ptr *Ctensor, self Ctensor) { - C.atg_round(ptr, self) -} -func AtgRound_(ptr *Ctensor, self Ctensor) { - C.atg_round_(ptr, self) -} -func AtgRoundOut(ptr *Ctensor, out Ctensor, self Ctensor) { - C.atg_round_out(ptr, out, self) -} -func AtgRrelu(ptr *Ctensor, self Ctensor, training int32) { - ctraining := *(*C.int)(unsafe.Pointer(&training)) - C.atg_rrelu(ptr, self, ctraining) -} -func AtgRrelu_(ptr *Ctensor, self Ctensor, training int32) { - ctraining := *(*C.int)(unsafe.Pointer(&training)) - C.atg_rrelu_(ptr, self, ctraining) -} -func AtgRreluWithNoise(ptr *Ctensor, self Ctensor, noise Ctensor, training int32) { - ctraining := *(*C.int)(unsafe.Pointer(&training)) - C.atg_rrelu_with_noise(ptr, self, noise, ctraining) -} -func AtgRreluWithNoise_(ptr *Ctensor, self Ctensor, noise Ctensor, training int32) { - ctraining := *(*C.int)(unsafe.Pointer(&training)) - C.atg_rrelu_with_noise_(ptr, self, noise, ctraining) -} -func AtgRreluWithNoiseBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, noise Ctensor, lower Cscalar, upper Cscalar, training int32, selfIsResult int32) { - ctraining := *(*C.int)(unsafe.Pointer(&training)) - cselfIsResult := *(*C.int)(unsafe.Pointer(&selfIsResult)) - C.atg_rrelu_with_noise_backward(ptr, gradOutput, self, noise, lower, upper, ctraining, cselfIsResult) -} -func AtgRreluWithNoiseOut(ptr *Ctensor, out Ctensor, self Ctensor, noise Ctensor, training int32) { - ctraining := *(*C.int)(unsafe.Pointer(&training)) - C.atg_rrelu_with_noise_out(ptr, out, self, noise, ctraining) -} -func AtgRsqrt(ptr *Ctensor, self Ctensor) { - C.atg_rsqrt(ptr, self) -} -func AtgRsqrt_(ptr *Ctensor, self Ctensor) { - C.atg_rsqrt_(ptr, self) -} -func AtgRsqrtOut(ptr *Ctensor, out Ctensor, self Ctensor) { - C.atg_rsqrt_out(ptr, out, self) -} -func AtgRsub(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg_rsub(ptr, self, other) -} -func AtgRsub1(ptr *Ctensor, self Ctensor, other Cscalar) { - C.atg_rsub1(ptr, self, other) -} -func AtgScalarTensor(ptr *Ctensor, s Cscalar, optionsKind int32, optionsDevice int32) { - coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) - coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) - C.atg_scalar_tensor(ptr, s, coptionsKind, coptionsDevice) -} -func AtgScatter(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, src Ctensor) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - C.atg_scatter(ptr, self, cdim, index, src) -} -func AtgScatter1(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, value Cscalar) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - C.atg_scatter1(ptr, self, cdim, index, value) -} -func AtgScatter_(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, src Ctensor) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - C.atg_scatter_(ptr, self, cdim, index, src) -} -func AtgScatter1_(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, value Cscalar) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - C.atg_scatter_1(ptr, self, cdim, index, value) -} -func AtgScatterAdd(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, src Ctensor) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - C.atg_scatter_add(ptr, self, cdim, index, src) -} -func AtgScatterAdd_(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, src Ctensor) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - C.atg_scatter_add_(ptr, self, cdim, index, src) -} -func AtgSelect(ptr *Ctensor, self Ctensor, dim int64, index int64) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - cindex := *(*C.int64_t)(unsafe.Pointer(&index)) - C.atg_select(ptr, self, cdim, cindex) -} -func AtgSelu(ptr *Ctensor, self Ctensor) { - C.atg_selu(ptr, self) -} -func AtgSelu_(ptr *Ctensor, self Ctensor) { - C.atg_selu_(ptr, self) -} -func AtgSet_(ptr *Ctensor, self Ctensor) { - C.atg_set_(ptr, self) -} -func AtgSet1_(ptr *Ctensor, self Ctensor, source Ctensor) { - C.atg_set_1(ptr, self, source) -} -func AtgSetRequiresGrad(ptr *Ctensor, self Ctensor, r int32) { - cr := *(*C.int)(unsafe.Pointer(&r)) - C.atg_set_requires_grad(ptr, self, cr) -} -func AtgSigmoid(ptr *Ctensor, self Ctensor) { - C.atg_sigmoid(ptr, self) -} -func AtgSigmoid_(ptr *Ctensor, self Ctensor) { - C.atg_sigmoid_(ptr, self) -} -func AtgSigmoidBackward(ptr *Ctensor, gradOutput Ctensor, output Ctensor) { - C.atg_sigmoid_backward(ptr, gradOutput, output) -} -func AtgSigmoidBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, output Ctensor) { - C.atg_sigmoid_backward_out(ptr, gradInput, gradOutput, output) -} -func AtgSigmoidOut(ptr *Ctensor, out Ctensor, self Ctensor) { - C.atg_sigmoid_out(ptr, out, self) -} -func AtgSign(ptr *Ctensor, self Ctensor) { - C.atg_sign(ptr, self) -} -func AtgSign_(ptr *Ctensor, self Ctensor) { - C.atg_sign_(ptr, self) -} -func AtgSignOut(ptr *Ctensor, out Ctensor, self Ctensor) { - C.atg_sign_out(ptr, out, self) -} -func AtgSin(ptr *Ctensor, self Ctensor) { - C.atg_sin(ptr, self) -} -func AtgSin_(ptr *Ctensor, self Ctensor) { - C.atg_sin_(ptr, self) -} -func AtgSinOut(ptr *Ctensor, out Ctensor, self Ctensor) { - C.atg_sin_out(ptr, out, self) -} -func AtgSinh(ptr *Ctensor, self Ctensor) { - C.atg_sinh(ptr, self) -} -func AtgSinh_(ptr *Ctensor, self Ctensor) { - C.atg_sinh_(ptr, self) -} -func AtgSinhOut(ptr *Ctensor, out Ctensor, self Ctensor) { - C.atg_sinh_out(ptr, out, self) -} -func AtgSlice(ptr *Ctensor, self Ctensor, dim int64, start int64, end int64, step int64) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - cstart := *(*C.int64_t)(unsafe.Pointer(&start)) - cend := *(*C.int64_t)(unsafe.Pointer(&end)) - cstep := *(*C.int64_t)(unsafe.Pointer(&step)) - C.atg_slice(ptr, self, cdim, cstart, cend, cstep) -} -func AtgSlogdet(ptr *Ctensor, self Ctensor) { - C.atg_slogdet(ptr, self) -} -func AtgSlowConv3d(ptr *Ctensor, self Ctensor, weight Ctensor, kernelSizeData []int64, kernelSizeLen int, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int) { - ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) - ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - C.atg_slow_conv3d(ptr, self, weight, ckernelSizeDataPtr, ckernelSizeLen, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen) -} -func AtgSlowConv3dOut(ptr *Ctensor, out Ctensor, self Ctensor, weight Ctensor, kernelSizeData []int64, kernelSizeLen int, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int) { - ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) - ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - C.atg_slow_conv3d_out(ptr, out, self, weight, ckernelSizeDataPtr, ckernelSizeLen, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen) -} -func AtgSlowConvDilated2d(ptr *Ctensor, self Ctensor, weight Ctensor, kernelSizeData []int64, kernelSizeLen int, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int) { - ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) - ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) - cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) - C.atg_slow_conv_dilated2d(ptr, self, weight, ckernelSizeDataPtr, ckernelSizeLen, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen) -} -func AtgSlowConvDilated3d(ptr *Ctensor, self Ctensor, weight Ctensor, kernelSizeData []int64, kernelSizeLen int, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int) { - ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) - ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) - cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) - C.atg_slow_conv_dilated3d(ptr, self, weight, ckernelSizeDataPtr, ckernelSizeLen, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen) -} -func AtgSlowConvTranspose2d(ptr *Ctensor, self Ctensor, weight Ctensor, kernelSizeData []int64, kernelSizeLen int, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, outputPaddingData []int64, outputPaddingLen int, dilationData []int64, dilationLen int) { - ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) - ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) - coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) - cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) - cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) - C.atg_slow_conv_transpose2d(ptr, self, weight, ckernelSizeDataPtr, ckernelSizeLen, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, coutputPaddingDataPtr, coutputPaddingLen, cdilationDataPtr, cdilationLen) -} -func AtgSlowConvTranspose2dOut(ptr *Ctensor, out Ctensor, self Ctensor, weight Ctensor, kernelSizeData []int64, kernelSizeLen int, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, outputPaddingData []int64, outputPaddingLen int, dilationData []int64, dilationLen int) { - ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) - ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) - coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) - cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) - cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) - C.atg_slow_conv_transpose2d_out(ptr, out, self, weight, ckernelSizeDataPtr, ckernelSizeLen, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, coutputPaddingDataPtr, coutputPaddingLen, cdilationDataPtr, cdilationLen) -} -func AtgSlowConvTranspose3d(ptr *Ctensor, self Ctensor, weight Ctensor, kernelSizeData []int64, kernelSizeLen int, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, outputPaddingData []int64, outputPaddingLen int, dilationData []int64, dilationLen int) { - ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) - ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) - coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) - cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) - cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) - C.atg_slow_conv_transpose3d(ptr, self, weight, ckernelSizeDataPtr, ckernelSizeLen, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, coutputPaddingDataPtr, coutputPaddingLen, cdilationDataPtr, cdilationLen) -} -func AtgSlowConvTranspose3dOut(ptr *Ctensor, out Ctensor, self Ctensor, weight Ctensor, kernelSizeData []int64, kernelSizeLen int, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, outputPaddingData []int64, outputPaddingLen int, dilationData []int64, dilationLen int) { - ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) - ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) - cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) - cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) - cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) - cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) - coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) - coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) - cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) - cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) - C.atg_slow_conv_transpose3d_out(ptr, out, self, weight, ckernelSizeDataPtr, ckernelSizeLen, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, coutputPaddingDataPtr, coutputPaddingLen, cdilationDataPtr, cdilationLen) -} -func AtgSmm(ptr *Ctensor, self Ctensor, mat2 Ctensor) { - C.atg_smm(ptr, self, mat2) -} -func AtgSmoothL1Loss(ptr *Ctensor, self Ctensor, target Ctensor, reduction int64) { - creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) - C.atg_smooth_l1_loss(ptr, self, target, creduction) -} -func AtgSmoothL1LossBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, reduction int64) { - creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) - C.atg_smooth_l1_loss_backward(ptr, gradOutput, self, target, creduction) -} -func AtgSmoothL1LossBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, reduction int64) { - creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) - C.atg_smooth_l1_loss_backward_out(ptr, gradInput, gradOutput, self, target, creduction) -} -func AtgSmoothL1LossOut(ptr *Ctensor, out Ctensor, self Ctensor, target Ctensor, reduction int64) { - creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) - C.atg_smooth_l1_loss_out(ptr, out, self, target, creduction) -} -func AtgSoftMarginLoss(ptr *Ctensor, self Ctensor, target Ctensor, reduction int64) { - creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) - C.atg_soft_margin_loss(ptr, self, target, creduction) -} -func AtgSoftMarginLossBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, reduction int64) { - creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) - C.atg_soft_margin_loss_backward(ptr, gradOutput, self, target, creduction) -} -func AtgSoftMarginLossBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, reduction int64) { - creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) - C.atg_soft_margin_loss_backward_out(ptr, gradInput, gradOutput, self, target, creduction) -} -func AtgSoftMarginLossOut(ptr *Ctensor, out Ctensor, self Ctensor, target Ctensor, reduction int64) { - creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) - C.atg_soft_margin_loss_out(ptr, out, self, target, creduction) -} -func AtgSoftmax(ptr *Ctensor, self Ctensor, dim int64, dtype int32) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - cdtype := *(*C.int)(unsafe.Pointer(&dtype)) - C.atg_softmax(ptr, self, cdim, cdtype) -} -func AtgSoftplus(ptr *Ctensor, self Ctensor) { - C.atg_softplus(ptr, self) -} -func AtgSoftplusBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, beta Cscalar, threshold Cscalar, output Ctensor) { - C.atg_softplus_backward(ptr, gradOutput, self, beta, threshold, output) -} -func AtgSoftplusBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, beta Cscalar, threshold Cscalar, output Ctensor) { - C.atg_softplus_backward_out(ptr, gradInput, gradOutput, self, beta, threshold, output) -} -func AtgSoftplusOut(ptr *Ctensor, out Ctensor, self Ctensor) { - C.atg_softplus_out(ptr, out, self) -} -func AtgSoftshrink(ptr *Ctensor, self Ctensor) { - C.atg_softshrink(ptr, self) -} -func AtgSoftshrinkBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, lambd Cscalar) { - C.atg_softshrink_backward(ptr, gradOutput, self, lambd) -} -func AtgSoftshrinkBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, lambd Cscalar) { - C.atg_softshrink_backward_out(ptr, gradInput, gradOutput, self, lambd) -} -func AtgSoftshrinkOut(ptr *Ctensor, out Ctensor, self Ctensor) { - C.atg_softshrink_out(ptr, out, self) -} -func AtgSolve(ptr *Ctensor, self Ctensor, a Ctensor) { - C.atg_solve(ptr, self, a) -} -func AtgSolveOut(ptr *Ctensor, solution Ctensor, lu Ctensor, self Ctensor, a Ctensor) { - C.atg_solve_out(ptr, solution, lu, self, a) -} -func AtgSort(ptr *Ctensor, self Ctensor, dim int64, descending int32) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - cdescending := *(*C.int)(unsafe.Pointer(&descending)) - C.atg_sort(ptr, self, cdim, cdescending) -} -func AtgSortOut(ptr *Ctensor, values Ctensor, indices Ctensor, self Ctensor, dim int64, descending int32) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - cdescending := *(*C.int)(unsafe.Pointer(&descending)) - C.atg_sort_out(ptr, values, indices, self, cdim, cdescending) -} -func AtgSparseCooTensor(ptr *Ctensor, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32) { - csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) - csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) - coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) - coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) - C.atg_sparse_coo_tensor(ptr, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice) -} -func AtgSparseCooTensor1(ptr *Ctensor, indices Ctensor, values Ctensor, optionsKind int32, optionsDevice int32) { - coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) - coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) - C.atg_sparse_coo_tensor1(ptr, indices, values, coptionsKind, coptionsDevice) -} -func AtgSparseCooTensor2(ptr *Ctensor, indices Ctensor, values Ctensor, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32) { - csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) - csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) - coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) - coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) - C.atg_sparse_coo_tensor2(ptr, indices, values, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice) -} -func AtgSparseMask(ptr *Ctensor, self Ctensor, mask Ctensor) { - C.atg_sparse_mask(ptr, self, mask) -} -func AtgSparseResize_(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int, sparseDim int64, denseDim int64) { - csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) - csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) - csparseDim := *(*C.int64_t)(unsafe.Pointer(&sparseDim)) - cdenseDim := *(*C.int64_t)(unsafe.Pointer(&denseDim)) - C.atg_sparse_resize_(ptr, self, csizeDataPtr, csizeLen, csparseDim, cdenseDim) -} -func AtgSparseResizeAndClear_(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int, sparseDim int64, denseDim int64) { - csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) - csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) - csparseDim := *(*C.int64_t)(unsafe.Pointer(&sparseDim)) - cdenseDim := *(*C.int64_t)(unsafe.Pointer(&denseDim)) - C.atg_sparse_resize_and_clear_(ptr, self, csizeDataPtr, csizeLen, csparseDim, cdenseDim) +func AtgBucketize(ptr *Ctensor, self Ctensor, boundaries Ctensor, outInt32 int32, right int32){ +coutInt32 := *(*C.int)(unsafe.Pointer(&outInt32)) +cright := *(*C.int)(unsafe.Pointer(&right)) +C.atg_bucketize(ptr, self, boundaries, coutInt32, cright) +} +func AtgBucketize1(ptr *Ctensor, selfScalar Cscalar, boundaries Ctensor, outInt32 int32, right int32){ +coutInt32 := *(*C.int)(unsafe.Pointer(&outInt32)) +cright := *(*C.int)(unsafe.Pointer(&right)) +C.atg_bucketize1(ptr, selfScalar , boundaries, coutInt32, cright) +} +func AtgBucketizeOut(ptr *Ctensor, out Ctensor, self Ctensor, boundaries Ctensor, outInt32 int32, right int32){ +coutInt32 := *(*C.int)(unsafe.Pointer(&outInt32)) +cright := *(*C.int)(unsafe.Pointer(&right)) +C.atg_bucketize_out(ptr, out, self, boundaries, coutInt32, cright) +} +func AtgCartesianProd(ptr *Ctensor, tensorsData []Ctensor, tensorsLen int){ +ctensorsDataPtr := (*Ctensor)(unsafe.Pointer(&tensorsData[0])) +ctensorsLen := *(*C.int)(unsafe.Pointer(&tensorsLen)) +C.atg_cartesian_prod(ptr, ctensorsDataPtr, ctensorsLen) +} +func AtgCat(ptr *Ctensor, tensorsData []Ctensor, tensorsLen int, dim int64){ +ctensorsDataPtr := (*Ctensor)(unsafe.Pointer(&tensorsData[0])) +ctensorsLen := *(*C.int)(unsafe.Pointer(&tensorsLen)) +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_cat(ptr, ctensorsDataPtr, ctensorsLen, cdim) +} +func AtgCatOut(ptr *Ctensor, out Ctensor, tensorsData []Ctensor, tensorsLen int, dim int64){ +ctensorsDataPtr := (*Ctensor)(unsafe.Pointer(&tensorsData[0])) +ctensorsLen := *(*C.int)(unsafe.Pointer(&tensorsLen)) +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_cat_out(ptr, out, ctensorsDataPtr, ctensorsLen, cdim) +} +func AtgCauchy_(ptr *Ctensor, self Ctensor, median float64, sigma float64){ +cmedian := *(*C.double)(unsafe.Pointer(&median)) +csigma := *(*C.double)(unsafe.Pointer(&sigma)) +C.atg_cauchy_(ptr, self, cmedian, csigma) +} +func AtgCdist(ptr *Ctensor, x1 Ctensor, x2 Ctensor, p float64, computeModeVal int64, computeModeNull int){ +cp := *(*C.double)(unsafe.Pointer(&p)) +ccomputeModeVal := *(*C.int64_t)(unsafe.Pointer(&computeModeVal)) +ccomputeModeNull := *(*C.uint8_t)(unsafe.Pointer(&computeModeNull)) +C.atg_cdist(ptr, x1, x2, cp, ccomputeModeVal, ccomputeModeNull) +} +func AtgCeil(ptr *Ctensor, self Ctensor){ +C.atg_ceil(ptr, self) +} +func AtgCeil_(ptr *Ctensor, self Ctensor){ +C.atg_ceil_(ptr, self) +} +func AtgCeilOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_ceil_out(ptr, out, self) +} +func AtgCelu(ptr *Ctensor, self Ctensor){ +C.atg_celu(ptr, self) +} +func AtgCelu_(ptr *Ctensor, self Ctensor){ +C.atg_celu_(ptr, self) +} +func AtgChainMatmul(ptr *Ctensor, matricesData []Ctensor, matricesLen int){ +cmatricesDataPtr := (*Ctensor)(unsafe.Pointer(&matricesData[0])) +cmatricesLen := *(*C.int)(unsafe.Pointer(&matricesLen)) +C.atg_chain_matmul(ptr, cmatricesDataPtr, cmatricesLen) +} +func AtgChannelShuffle(ptr *Ctensor, self Ctensor, groups int64){ +cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) +C.atg_channel_shuffle(ptr, self, cgroups) +} +func AtgCholesky(ptr *Ctensor, self Ctensor, upper int32){ +cupper := *(*C.int)(unsafe.Pointer(&upper)) +C.atg_cholesky(ptr, self, cupper) +} +func AtgCholeskyInverse(ptr *Ctensor, self Ctensor, upper int32){ +cupper := *(*C.int)(unsafe.Pointer(&upper)) +C.atg_cholesky_inverse(ptr, self, cupper) +} +func AtgCholeskyInverseOut(ptr *Ctensor, out Ctensor, self Ctensor, upper int32){ +cupper := *(*C.int)(unsafe.Pointer(&upper)) +C.atg_cholesky_inverse_out(ptr, out, self, cupper) +} +func AtgCholeskyOut(ptr *Ctensor, out Ctensor, self Ctensor, upper int32){ +cupper := *(*C.int)(unsafe.Pointer(&upper)) +C.atg_cholesky_out(ptr, out, self, cupper) +} +func AtgCholeskySolve(ptr *Ctensor, self Ctensor, input2 Ctensor, upper int32){ +cupper := *(*C.int)(unsafe.Pointer(&upper)) +C.atg_cholesky_solve(ptr, self, input2, cupper) +} +func AtgCholeskySolveOut(ptr *Ctensor, out Ctensor, self Ctensor, input2 Ctensor, upper int32){ +cupper := *(*C.int)(unsafe.Pointer(&upper)) +C.atg_cholesky_solve_out(ptr, out, self, input2, cupper) } -func AtgSqrt(ptr *Ctensor, self Ctensor) { - C.atg_sqrt(ptr, self) -} -func AtgSqrt_(ptr *Ctensor, self Ctensor) { - C.atg_sqrt_(ptr, self) -} -func AtgSqrtOut(ptr *Ctensor, out Ctensor, self Ctensor) { - C.atg_sqrt_out(ptr, out, self) -} -func AtgSquare(ptr *Ctensor, self Ctensor) { - C.atg_square(ptr, self) -} -func AtgSquare_(ptr *Ctensor, self Ctensor) { - C.atg_square_(ptr, self) -} -func AtgSqueeze(ptr *Ctensor, self Ctensor) { - C.atg_squeeze(ptr, self) -} -func AtgSqueeze1(ptr *Ctensor, self Ctensor, dim int64) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - C.atg_squeeze1(ptr, self, cdim) -} -func AtgSqueeze_(ptr *Ctensor, self Ctensor) { - C.atg_squeeze_(ptr, self) -} -func AtgSqueeze1_(ptr *Ctensor, self Ctensor, dim int64) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - C.atg_squeeze_1(ptr, self, cdim) -} -func AtgSspaddmm(ptr *Ctensor, self Ctensor, mat1 Ctensor, mat2 Ctensor) { - C.atg_sspaddmm(ptr, self, mat1, mat2) -} -func AtgSspaddmmOut(ptr *Ctensor, out Ctensor, self Ctensor, mat1 Ctensor, mat2 Ctensor) { - C.atg_sspaddmm_out(ptr, out, self, mat1, mat2) -} -func AtgStack(ptr *Ctensor, tensorsData []Ctensor, tensorsLen int, dim int64) { - ctensorsDataPtr := (*Ctensor)(unsafe.Pointer(&tensorsData[0])) - ctensorsLen := *(*C.int)(unsafe.Pointer(&tensorsLen)) - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - C.atg_stack(ptr, ctensorsDataPtr, ctensorsLen, cdim) -} -func AtgStackOut(ptr *Ctensor, out Ctensor, tensorsData []Ctensor, tensorsLen int, dim int64) { - ctensorsDataPtr := (*Ctensor)(unsafe.Pointer(&tensorsData[0])) - ctensorsLen := *(*C.int)(unsafe.Pointer(&tensorsLen)) - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - C.atg_stack_out(ptr, out, ctensorsDataPtr, ctensorsLen, cdim) -} -func AtgStd(ptr *Ctensor, self Ctensor, unbiased int32) { - cunbiased := *(*C.int)(unsafe.Pointer(&unbiased)) - C.atg_std(ptr, self, cunbiased) -} -func AtgStd1(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, unbiased int32, keepdim int32) { - cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) - cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) - cunbiased := *(*C.int)(unsafe.Pointer(&unbiased)) - ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) - C.atg_std1(ptr, self, cdimDataPtr, cdimLen, cunbiased, ckeepdim) -} -func AtgStdMean(ptr *Ctensor, self Ctensor, unbiased int32) { - cunbiased := *(*C.int)(unsafe.Pointer(&unbiased)) - C.atg_std_mean(ptr, self, cunbiased) -} -func AtgStdMean1(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, unbiased int32, keepdim int32) { - cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) - cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) - cunbiased := *(*C.int)(unsafe.Pointer(&unbiased)) - ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) - C.atg_std_mean1(ptr, self, cdimDataPtr, cdimLen, cunbiased, ckeepdim) -} -func AtgStdOut(ptr *Ctensor, out Ctensor, self Ctensor, dimData []int64, dimLen int, unbiased int32, keepdim int32) { - cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) - cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) - cunbiased := *(*C.int)(unsafe.Pointer(&unbiased)) - ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) - C.atg_std_out(ptr, out, self, cdimDataPtr, cdimLen, cunbiased, ckeepdim) -} -func AtgStft(ptr *Ctensor, self Ctensor, nFft int64, hopLength int64, winLength int64, window Ctensor, normalized int32, onesided int32) { - cnFft := *(*C.int64_t)(unsafe.Pointer(&nFft)) - chopLength := *(*C.int64_t)(unsafe.Pointer(&hopLength)) - cwinLength := *(*C.int64_t)(unsafe.Pointer(&winLength)) - cnormalized := *(*C.int)(unsafe.Pointer(&normalized)) - conesided := *(*C.int)(unsafe.Pointer(&onesided)) - C.atg_stft(ptr, self, cnFft, chopLength, cwinLength, window, cnormalized, conesided) -} -func AtgSub(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg_sub(ptr, self, other) -} -func AtgSub1(ptr *Ctensor, self Ctensor, other Cscalar) { - C.atg_sub1(ptr, self, other) -} -func AtgSub_(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg_sub_(ptr, self, other) -} -func AtgSub1_(ptr *Ctensor, self Ctensor, other Cscalar) { - C.atg_sub_1(ptr, self, other) -} -func AtgSubOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor) { - C.atg_sub_out(ptr, out, self, other) -} -func AtgSum(ptr *Ctensor, self Ctensor, dtype int32) { - cdtype := *(*C.int)(unsafe.Pointer(&dtype)) - C.atg_sum(ptr, self, cdtype) -} -func AtgSum1(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32, dtype int32) { - cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) - cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) - ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) - cdtype := *(*C.int)(unsafe.Pointer(&dtype)) - C.atg_sum1(ptr, self, cdimDataPtr, cdimLen, ckeepdim, cdtype) -} -func AtgSumOut(ptr *Ctensor, out Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32, dtype int32) { - cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) - cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) - ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) - cdtype := *(*C.int)(unsafe.Pointer(&dtype)) - C.atg_sum_out(ptr, out, self, cdimDataPtr, cdimLen, ckeepdim, cdtype) -} -func AtgSumToSize(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int) { - csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) - csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) - C.atg_sum_to_size(ptr, self, csizeDataPtr, csizeLen) -} -func AtgSvd(ptr *Ctensor, self Ctensor, some int32, computeUv int32) { - csome := *(*C.int)(unsafe.Pointer(&some)) - ccomputeUv := *(*C.int)(unsafe.Pointer(&computeUv)) - C.atg_svd(ptr, self, csome, ccomputeUv) -} -func AtgSvdOut(ptr *Ctensor, u Ctensor, s Ctensor, v Ctensor, self Ctensor, some int32, computeUv int32) { - csome := *(*C.int)(unsafe.Pointer(&some)) - ccomputeUv := *(*C.int)(unsafe.Pointer(&computeUv)) - C.atg_svd_out(ptr, u, s, v, self, csome, ccomputeUv) -} -func AtgSymeig(ptr *Ctensor, self Ctensor, eigenvectors int32, upper int32) { - ceigenvectors := *(*C.int)(unsafe.Pointer(&eigenvectors)) - cupper := *(*C.int)(unsafe.Pointer(&upper)) - C.atg_symeig(ptr, self, ceigenvectors, cupper) -} -func AtgSymeigOut(ptr *Ctensor, e Ctensor, v Ctensor, self Ctensor, eigenvectors int32, upper int32) { - ceigenvectors := *(*C.int)(unsafe.Pointer(&eigenvectors)) - cupper := *(*C.int)(unsafe.Pointer(&upper)) - C.atg_symeig_out(ptr, e, v, self, ceigenvectors, cupper) -} -func AtgT(ptr *Ctensor, self Ctensor) { - C.atg_t(ptr, self) -} -func AtgT_(ptr *Ctensor, self Ctensor) { - C.atg_t_(ptr, self) -} -func AtgTake(ptr *Ctensor, self Ctensor, index Ctensor) { - C.atg_take(ptr, self, index) -} -func AtgTakeOut(ptr *Ctensor, out Ctensor, self Ctensor, index Ctensor) { - C.atg_take_out(ptr, out, self, index) -} -func AtgTan(ptr *Ctensor, self Ctensor) { - C.atg_tan(ptr, self) -} -func AtgTan_(ptr *Ctensor, self Ctensor) { - C.atg_tan_(ptr, self) -} -func AtgTanOut(ptr *Ctensor, out Ctensor, self Ctensor) { - C.atg_tan_out(ptr, out, self) -} -func AtgTanh(ptr *Ctensor, self Ctensor) { - C.atg_tanh(ptr, self) -} -func AtgTanh_(ptr *Ctensor, self Ctensor) { - C.atg_tanh_(ptr, self) -} -func AtgTanhBackward(ptr *Ctensor, gradOutput Ctensor, output Ctensor) { - C.atg_tanh_backward(ptr, gradOutput, output) -} -func AtgTanhBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, output Ctensor) { - C.atg_tanh_backward_out(ptr, gradInput, gradOutput, output) -} -func AtgTanhOut(ptr *Ctensor, out Ctensor, self Ctensor) { - C.atg_tanh_out(ptr, out, self) -} -func AtgTensordot(ptr *Ctensor, self Ctensor, other Ctensor, dimsSelfData []int64, dimsSelfLen int, dimsOtherData []int64, dimsOtherLen int) { - cdimsSelfDataPtr := (*C.int64_t)(unsafe.Pointer(&dimsSelfData[0])) - cdimsSelfLen := *(*C.int)(unsafe.Pointer(&dimsSelfLen)) - cdimsOtherDataPtr := (*C.int64_t)(unsafe.Pointer(&dimsOtherData[0])) - cdimsOtherLen := *(*C.int)(unsafe.Pointer(&dimsOtherLen)) - C.atg_tensordot(ptr, self, other, cdimsSelfDataPtr, cdimsSelfLen, cdimsOtherDataPtr, cdimsOtherLen) -} -func AtgThreshold(ptr *Ctensor, self Ctensor, threshold Cscalar, value Cscalar) { - C.atg_threshold(ptr, self, threshold, value) -} -func AtgThreshold_(ptr *Ctensor, self Ctensor, threshold Cscalar, value Cscalar) { - C.atg_threshold_(ptr, self, threshold, value) -} -func AtgThresholdBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, threshold Cscalar) { - C.atg_threshold_backward(ptr, gradOutput, self, threshold) -} -func AtgThresholdOut(ptr *Ctensor, out Ctensor, self Ctensor, threshold Cscalar, value Cscalar) { - C.atg_threshold_out(ptr, out, self, threshold, value) -} -func AtgTo(ptr *Ctensor, self Ctensor, device int32) { - cdevice := *(*C.int)(unsafe.Pointer(&device)) - C.atg_to(ptr, self, cdevice) -} -func AtgTo1(ptr *Ctensor, self Ctensor, optionsKind int32, optionsDevice int32, nonBlocking int32, copy int32) { - coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) - coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) - cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) - ccopy := *(*C.int)(unsafe.Pointer(©)) - C.atg_to1(ptr, self, coptionsKind, coptionsDevice, cnonBlocking, ccopy) -} -func AtgTo2(ptr *Ctensor, self Ctensor, dtype int32, nonBlocking int32, copy int32) { - cdtype := *(*C.int)(unsafe.Pointer(&dtype)) - cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) - ccopy := *(*C.int)(unsafe.Pointer(©)) - C.atg_to2(ptr, self, cdtype, cnonBlocking, ccopy) -} -func AtgTo3(ptr *Ctensor, self Ctensor, other Ctensor, nonBlocking int32, copy int32) { - cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) - ccopy := *(*C.int)(unsafe.Pointer(©)) - C.atg_to3(ptr, self, other, cnonBlocking, ccopy) -} -func AtgTo4(ptr *Ctensor, self Ctensor, device int32, dtype int32, nonBlocking int32, copy int32) { - cdevice := *(*C.int)(unsafe.Pointer(&device)) - cdtype := *(*C.int)(unsafe.Pointer(&dtype)) - cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) - ccopy := *(*C.int)(unsafe.Pointer(©)) - C.atg_to4(ptr, self, cdevice, cdtype, cnonBlocking, ccopy) -} -func AtgToDense(ptr *Ctensor, self Ctensor) { - C.atg_to_dense(ptr, self) -} -func AtgToDenseBackward(ptr *Ctensor, grad Ctensor, input Ctensor) { - C.atg_to_dense_backward(ptr, grad, input) -} -func AtgToMkldnn(ptr *Ctensor, self Ctensor) { - C.atg_to_mkldnn(ptr, self) -} -func AtgToMkldnnBackward(ptr *Ctensor, grad Ctensor, input Ctensor) { - C.atg_to_mkldnn_backward(ptr, grad, input) -} -func AtgToSparse(ptr *Ctensor, self Ctensor) { - C.atg_to_sparse(ptr, self) -} -func AtgToSparse1(ptr *Ctensor, self Ctensor, sparseDim int64) { - csparseDim := *(*C.int64_t)(unsafe.Pointer(&sparseDim)) - C.atg_to_sparse1(ptr, self, csparseDim) -} -func AtgTopk(ptr *Ctensor, self Ctensor, k int64, dim int64, largest int32, sorted int32) { - ck := *(*C.int64_t)(unsafe.Pointer(&k)) - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - clargest := *(*C.int)(unsafe.Pointer(&largest)) - csorted := *(*C.int)(unsafe.Pointer(&sorted)) - C.atg_topk(ptr, self, ck, cdim, clargest, csorted) -} -func AtgTopkOut(ptr *Ctensor, values Ctensor, indices Ctensor, self Ctensor, k int64, dim int64, largest int32, sorted int32) { - ck := *(*C.int64_t)(unsafe.Pointer(&k)) - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - clargest := *(*C.int)(unsafe.Pointer(&largest)) - csorted := *(*C.int)(unsafe.Pointer(&sorted)) - C.atg_topk_out(ptr, values, indices, self, ck, cdim, clargest, csorted) -} -func AtgTotype(ptr *Ctensor, self Ctensor, scalarType int32) { - cscalarType := *(*C.int)(unsafe.Pointer(&scalarType)) - C.atg_totype(ptr, self, cscalarType) -} -func AtgTrace(ptr *Ctensor, self Ctensor) { - C.atg_trace(ptr, self) -} -func AtgTranspose(ptr *Ctensor, self Ctensor, dim0 int64, dim1 int64) { - cdim0 := *(*C.int64_t)(unsafe.Pointer(&dim0)) - cdim1 := *(*C.int64_t)(unsafe.Pointer(&dim1)) - C.atg_transpose(ptr, self, cdim0, cdim1) -} -func AtgTranspose_(ptr *Ctensor, self Ctensor, dim0 int64, dim1 int64) { - cdim0 := *(*C.int64_t)(unsafe.Pointer(&dim0)) - cdim1 := *(*C.int64_t)(unsafe.Pointer(&dim1)) - C.atg_transpose_(ptr, self, cdim0, cdim1) -} -func AtgTrapz(ptr *Ctensor, y Ctensor, x Ctensor, dim int64) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - C.atg_trapz(ptr, y, x, cdim) -} -func AtgTrapz1(ptr *Ctensor, y Ctensor, dx float64, dim int64) { - cdx := *(*C.double)(unsafe.Pointer(&dx)) - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - C.atg_trapz1(ptr, y, cdx, cdim) -} -func AtgTriangularSolve(ptr *Ctensor, self Ctensor, a Ctensor, upper int32, transpose int32, unitriangular int32) { - cupper := *(*C.int)(unsafe.Pointer(&upper)) - ctranspose := *(*C.int)(unsafe.Pointer(&transpose)) - cunitriangular := *(*C.int)(unsafe.Pointer(&unitriangular)) - C.atg_triangular_solve(ptr, self, a, cupper, ctranspose, cunitriangular) -} -func AtgTriangularSolveOut(ptr *Ctensor, x Ctensor, m Ctensor, self Ctensor, a Ctensor, upper int32, transpose int32, unitriangular int32) { - cupper := *(*C.int)(unsafe.Pointer(&upper)) - ctranspose := *(*C.int)(unsafe.Pointer(&transpose)) - cunitriangular := *(*C.int)(unsafe.Pointer(&unitriangular)) - C.atg_triangular_solve_out(ptr, x, m, self, a, cupper, ctranspose, cunitriangular) -} -func AtgTril(ptr *Ctensor, self Ctensor, diagonal int64) { - cdiagonal := *(*C.int64_t)(unsafe.Pointer(&diagonal)) - C.atg_tril(ptr, self, cdiagonal) -} -func AtgTril_(ptr *Ctensor, self Ctensor, diagonal int64) { - cdiagonal := *(*C.int64_t)(unsafe.Pointer(&diagonal)) - C.atg_tril_(ptr, self, cdiagonal) -} -func AtgTrilIndices(ptr *Ctensor, row int64, col int64, offset int64, optionsKind int32, optionsDevice int32) { - crow := *(*C.int64_t)(unsafe.Pointer(&row)) - ccol := *(*C.int64_t)(unsafe.Pointer(&col)) - coffset := *(*C.int64_t)(unsafe.Pointer(&offset)) - coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) - coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) - C.atg_tril_indices(ptr, crow, ccol, coffset, coptionsKind, coptionsDevice) -} -func AtgTrilOut(ptr *Ctensor, out Ctensor, self Ctensor, diagonal int64) { - cdiagonal := *(*C.int64_t)(unsafe.Pointer(&diagonal)) - C.atg_tril_out(ptr, out, self, cdiagonal) -} -func AtgTripletMarginLoss(ptr *Ctensor, anchor Ctensor, positive Ctensor, negative Ctensor, margin float64, p float64, eps float64, swap int32, reduction int64) { - cmargin := *(*C.double)(unsafe.Pointer(&margin)) - cp := *(*C.double)(unsafe.Pointer(&p)) - ceps := *(*C.double)(unsafe.Pointer(&eps)) - cswap := *(*C.int)(unsafe.Pointer(&swap)) - creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) - C.atg_triplet_margin_loss(ptr, anchor, positive, negative, cmargin, cp, ceps, cswap, creduction) -} -func AtgTriu(ptr *Ctensor, self Ctensor, diagonal int64) { - cdiagonal := *(*C.int64_t)(unsafe.Pointer(&diagonal)) - C.atg_triu(ptr, self, cdiagonal) -} -func AtgTriu_(ptr *Ctensor, self Ctensor, diagonal int64) { - cdiagonal := *(*C.int64_t)(unsafe.Pointer(&diagonal)) - C.atg_triu_(ptr, self, cdiagonal) -} -func AtgTriuIndices(ptr *Ctensor, row int64, col int64, offset int64, optionsKind int32, optionsDevice int32) { - crow := *(*C.int64_t)(unsafe.Pointer(&row)) - ccol := *(*C.int64_t)(unsafe.Pointer(&col)) - coffset := *(*C.int64_t)(unsafe.Pointer(&offset)) - coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) - coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) - C.atg_triu_indices(ptr, crow, ccol, coffset, coptionsKind, coptionsDevice) -} -func AtgTriuOut(ptr *Ctensor, out Ctensor, self Ctensor, diagonal int64) { - cdiagonal := *(*C.int64_t)(unsafe.Pointer(&diagonal)) - C.atg_triu_out(ptr, out, self, cdiagonal) -} -func AtgTrueDivide(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg_true_divide(ptr, self, other) -} -func AtgTrueDivide1(ptr *Ctensor, self Ctensor, other Cscalar) { - C.atg_true_divide1(ptr, self, other) -} -func AtgTrueDivide_(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg_true_divide_(ptr, self, other) -} -func AtgTrueDivide1_(ptr *Ctensor, self Ctensor, other Cscalar) { - C.atg_true_divide_1(ptr, self, other) -} -func AtgTrueDivideOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor) { - C.atg_true_divide_out(ptr, out, self, other) -} -func AtgTrunc(ptr *Ctensor, self Ctensor) { - C.atg_trunc(ptr, self) -} -func AtgTrunc_(ptr *Ctensor, self Ctensor) { - C.atg_trunc_(ptr, self) -} -func AtgTruncOut(ptr *Ctensor, out Ctensor, self Ctensor) { - C.atg_trunc_out(ptr, out, self) -} -func AtgTypeAs(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg_type_as(ptr, self, other) +func AtgClamp(ptr *Ctensor, self Ctensor, min Cscalar, max Cscalar){ +C.atg_clamp(ptr, self, min , max ) +} +func AtgClamp_(ptr *Ctensor, self Ctensor, min Cscalar, max Cscalar){ +C.atg_clamp_(ptr, self, min , max ) +} +func AtgClampMax(ptr *Ctensor, self Ctensor, max Cscalar){ +C.atg_clamp_max(ptr, self, max ) +} +func AtgClampMax_(ptr *Ctensor, self Ctensor, max Cscalar){ +C.atg_clamp_max_(ptr, self, max ) +} +func AtgClampMaxOut(ptr *Ctensor, out Ctensor, self Ctensor, max Cscalar){ +C.atg_clamp_max_out(ptr, out, self, max ) +} +func AtgClampMin(ptr *Ctensor, self Ctensor, min Cscalar){ +C.atg_clamp_min(ptr, self, min ) +} +func AtgClampMin_(ptr *Ctensor, self Ctensor, min Cscalar){ +C.atg_clamp_min_(ptr, self, min ) +} +func AtgClampMinOut(ptr *Ctensor, out Ctensor, self Ctensor, min Cscalar){ +C.atg_clamp_min_out(ptr, out, self, min ) +} +func AtgClampOut(ptr *Ctensor, out Ctensor, self Ctensor, min Cscalar, max Cscalar){ +C.atg_clamp_out(ptr, out, self, min , max ) +} +func AtgClip(ptr *Ctensor, self Ctensor, min Cscalar, max Cscalar){ +C.atg_clip(ptr, self, min , max ) +} +func AtgClip_(ptr *Ctensor, self Ctensor, min Cscalar, max Cscalar){ +C.atg_clip_(ptr, self, min , max ) +} +func AtgClipOut(ptr *Ctensor, out Ctensor, self Ctensor, min Cscalar, max Cscalar){ +C.atg_clip_out(ptr, out, self, min , max ) +} +func AtgCoalesce(ptr *Ctensor, self Ctensor){ +C.atg_coalesce(ptr, self) +} +func AtgCol2im(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, kernelSizeData []int64, kernelSizeLen int, dilationData []int64, dilationLen int, paddingData []int64, paddingLen int, strideData []int64, strideLen int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +C.atg_col2im(ptr, self, coutputSizeDataPtr, coutputSizeLen, ckernelSizeDataPtr, ckernelSizeLen, cdilationDataPtr, cdilationLen, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen) +} +func AtgCol2imBackward(ptr *Ctensor, gradOutput Ctensor, kernelSizeData []int64, kernelSizeLen int, dilationData []int64, dilationLen int, paddingData []int64, paddingLen int, strideData []int64, strideLen int){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +C.atg_col2im_backward(ptr, gradOutput, ckernelSizeDataPtr, ckernelSizeLen, cdilationDataPtr, cdilationLen, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen) +} +func AtgCol2imBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, kernelSizeData []int64, kernelSizeLen int, dilationData []int64, dilationLen int, paddingData []int64, paddingLen int, strideData []int64, strideLen int){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +C.atg_col2im_backward_out(ptr, gradInput, gradOutput, ckernelSizeDataPtr, ckernelSizeLen, cdilationDataPtr, cdilationLen, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen) +} +func AtgCol2imOut(ptr *Ctensor, out Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, kernelSizeData []int64, kernelSizeLen int, dilationData []int64, dilationLen int, paddingData []int64, paddingLen int, strideData []int64, strideLen int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +C.atg_col2im_out(ptr, out, self, coutputSizeDataPtr, coutputSizeLen, ckernelSizeDataPtr, ckernelSizeLen, cdilationDataPtr, cdilationLen, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen) +} +func AtgCombinations(ptr *Ctensor, self Ctensor, r int64, withReplacement int32){ +cr := *(*C.int64_t)(unsafe.Pointer(&r)) +cwithReplacement := *(*C.int)(unsafe.Pointer(&withReplacement)) +C.atg_combinations(ptr, self, cr, cwithReplacement) +} +func AtgComplex(ptr *Ctensor, real Ctensor, imag Ctensor){ +C.atg_complex(ptr, real, imag) +} +func AtgComplexOut(ptr *Ctensor, out Ctensor, real Ctensor, imag Ctensor){ +C.atg_complex_out(ptr, out, real, imag) +} +func AtgConj(ptr *Ctensor, self Ctensor){ +C.atg_conj(ptr, self) +} +func AtgConjOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_conj_out(ptr, out, self) +} +func AtgConstantPadNd(ptr *Ctensor, self Ctensor, padData []int64, padLen int){ +cpadDataPtr := (*C.int64_t)(unsafe.Pointer(&padData[0])) +cpadLen := *(*C.int)(unsafe.Pointer(&padLen)) +C.atg_constant_pad_nd(ptr, self, cpadDataPtr, cpadLen) +} +func AtgContiguous(ptr *Ctensor, self Ctensor){ +C.atg_contiguous(ptr, self) +} +func AtgConv1d(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, groups int64){ +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) +C.atg_conv1d(ptr, input, weight, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cgroups) +} +func AtgConv2d(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, groups int64){ +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) +C.atg_conv2d(ptr, input, weight, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cgroups) +} +func AtgConv3d(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, groups int64){ +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) +C.atg_conv3d(ptr, input, weight, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cgroups) +} +func AtgConvTbc(ptr *Ctensor, self Ctensor, weight Ctensor, bias Ctensor, pad int64){ +cpad := *(*C.int64_t)(unsafe.Pointer(&pad)) +C.atg_conv_tbc(ptr, self, weight, bias, cpad) +} +func AtgConvTbcBackward(ptr *Ctensor, self Ctensor, input Ctensor, weight Ctensor, bias Ctensor, pad int64){ +cpad := *(*C.int64_t)(unsafe.Pointer(&pad)) +C.atg_conv_tbc_backward(ptr, self, input, weight, bias, cpad) +} +func AtgConvTranspose1d(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, outputPaddingData []int64, outputPaddingLen int, groups int64, dilationData []int64, dilationLen int){ +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) +coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) +cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +C.atg_conv_transpose1d(ptr, input, weight, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, coutputPaddingDataPtr, coutputPaddingLen, cgroups, cdilationDataPtr, cdilationLen) +} +func AtgConvTranspose2d(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, outputPaddingData []int64, outputPaddingLen int, groups int64, dilationData []int64, dilationLen int){ +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) +coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) +cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +C.atg_conv_transpose2d(ptr, input, weight, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, coutputPaddingDataPtr, coutputPaddingLen, cgroups, cdilationDataPtr, cdilationLen) +} +func AtgConvTranspose3d(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, outputPaddingData []int64, outputPaddingLen int, groups int64, dilationData []int64, dilationLen int){ +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) +coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) +cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +C.atg_conv_transpose3d(ptr, input, weight, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, coutputPaddingDataPtr, coutputPaddingLen, cgroups, cdilationDataPtr, cdilationLen) +} +func AtgConvolution(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, transposed int32, outputPaddingData []int64, outputPaddingLen int, groups int64){ +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +ctransposed := *(*C.int)(unsafe.Pointer(&transposed)) +coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) +coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) +cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) +C.atg_convolution(ptr, input, weight, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, ctransposed, coutputPaddingDataPtr, coutputPaddingLen, cgroups) +} +func AtgConvolutionOverrideable(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, transposed int32, outputPaddingData []int64, outputPaddingLen int, groups int64){ +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +ctransposed := *(*C.int)(unsafe.Pointer(&transposed)) +coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) +coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) +cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) +C.atg_convolution_overrideable(ptr, input, weight, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, ctransposed, coutputPaddingDataPtr, coutputPaddingLen, cgroups) +} +func AtgCopySparseToSparse_(ptr *Ctensor, self Ctensor, src Ctensor, nonBlocking int32){ +cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) +C.atg_copy_sparse_to_sparse_(ptr, self, src, cnonBlocking) +} +func AtgCos(ptr *Ctensor, self Ctensor){ +C.atg_cos(ptr, self) +} +func AtgCos_(ptr *Ctensor, self Ctensor){ +C.atg_cos_(ptr, self) +} +func AtgCosOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_cos_out(ptr, out, self) +} +func AtgCosh(ptr *Ctensor, self Ctensor){ +C.atg_cosh(ptr, self) +} +func AtgCosh_(ptr *Ctensor, self Ctensor){ +C.atg_cosh_(ptr, self) +} +func AtgCoshOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_cosh_out(ptr, out, self) +} +func AtgCosineEmbeddingLoss(ptr *Ctensor, input1 Ctensor, input2 Ctensor, target Ctensor, margin float64, reduction int64){ +cmargin := *(*C.double)(unsafe.Pointer(&margin)) +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +C.atg_cosine_embedding_loss(ptr, input1, input2, target, cmargin, creduction) +} +func AtgCosineSimilarity(ptr *Ctensor, x1 Ctensor, x2 Ctensor, dim int64, eps float64){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +ceps := *(*C.double)(unsafe.Pointer(&eps)) +C.atg_cosine_similarity(ptr, x1, x2, cdim, ceps) +} +func AtgCountNonzero(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int){ +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +C.atg_count_nonzero(ptr, self, cdimDataPtr, cdimLen) +} +func AtgCountNonzero1(ptr *Ctensor, self Ctensor, dimVal int64, dimNull int){ +cdimVal := *(*C.int64_t)(unsafe.Pointer(&dimVal)) +cdimNull := *(*C.uint8_t)(unsafe.Pointer(&dimNull)) +C.atg_count_nonzero1(ptr, self, cdimVal, cdimNull) +} +func AtgCross(ptr *Ctensor, self Ctensor, other Ctensor, dimVal int64, dimNull int){ +cdimVal := *(*C.int64_t)(unsafe.Pointer(&dimVal)) +cdimNull := *(*C.uint8_t)(unsafe.Pointer(&dimNull)) +C.atg_cross(ptr, self, other, cdimVal, cdimNull) +} +func AtgCrossOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor, dimVal int64, dimNull int){ +cdimVal := *(*C.int64_t)(unsafe.Pointer(&dimVal)) +cdimNull := *(*C.uint8_t)(unsafe.Pointer(&dimNull)) +C.atg_cross_out(ptr, out, self, other, cdimVal, cdimNull) +} +func AtgCtcLoss(ptr *Ctensor, logProbs Ctensor, targets Ctensor, inputLengthsData []int64, inputLengthsLen int, targetLengthsData []int64, targetLengthsLen int, blank int64, reduction int64, zeroInfinity int32){ +cinputLengthsDataPtr := (*C.int64_t)(unsafe.Pointer(&inputLengthsData[0])) +cinputLengthsLen := *(*C.int)(unsafe.Pointer(&inputLengthsLen)) +ctargetLengthsDataPtr := (*C.int64_t)(unsafe.Pointer(&targetLengthsData[0])) +ctargetLengthsLen := *(*C.int)(unsafe.Pointer(&targetLengthsLen)) +cblank := *(*C.int64_t)(unsafe.Pointer(&blank)) +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +czeroInfinity := *(*C.int)(unsafe.Pointer(&zeroInfinity)) +C.atg_ctc_loss(ptr, logProbs, targets, cinputLengthsDataPtr, cinputLengthsLen, ctargetLengthsDataPtr, ctargetLengthsLen, cblank, creduction, czeroInfinity) +} +func AtgCtcLoss1(ptr *Ctensor, logProbs Ctensor, targets Ctensor, inputLengths Ctensor, targetLengths Ctensor, blank int64, reduction int64, zeroInfinity int32){ +cblank := *(*C.int64_t)(unsafe.Pointer(&blank)) +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +czeroInfinity := *(*C.int)(unsafe.Pointer(&zeroInfinity)) +C.atg_ctc_loss1(ptr, logProbs, targets, inputLengths, targetLengths, cblank, creduction, czeroInfinity) +} +func AtgCudnnAffineGridGenerator(ptr *Ctensor, theta Ctensor, n int64, c int64, h int64, w int64){ +cn := *(*C.int64_t)(unsafe.Pointer(&n)) +cc := *(*C.int64_t)(unsafe.Pointer(&c)) +ch := *(*C.int64_t)(unsafe.Pointer(&h)) +cw := *(*C.int64_t)(unsafe.Pointer(&w)) +C.atg_cudnn_affine_grid_generator(ptr, theta, cn, cc, ch, cw) +} +func AtgCudnnAffineGridGeneratorBackward(ptr *Ctensor, grad Ctensor, n int64, c int64, h int64, w int64){ +cn := *(*C.int64_t)(unsafe.Pointer(&n)) +cc := *(*C.int64_t)(unsafe.Pointer(&c)) +ch := *(*C.int64_t)(unsafe.Pointer(&h)) +cw := *(*C.int64_t)(unsafe.Pointer(&w)) +C.atg_cudnn_affine_grid_generator_backward(ptr, grad, cn, cc, ch, cw) +} +func AtgCudnnBatchNorm(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, runningMean Ctensor, runningVar Ctensor, training int32, exponentialAverageFactor float64, epsilon float64){ +ctraining := *(*C.int)(unsafe.Pointer(&training)) +cexponentialAverageFactor := *(*C.double)(unsafe.Pointer(&exponentialAverageFactor)) +cepsilon := *(*C.double)(unsafe.Pointer(&epsilon)) +C.atg_cudnn_batch_norm(ptr, input, weight, bias, runningMean, runningVar, ctraining, cexponentialAverageFactor, cepsilon) +} +func AtgCudnnBatchNormBackward(ptr *Ctensor, input Ctensor, gradOutput Ctensor, weight Ctensor, runningMean Ctensor, runningVar Ctensor, saveMean Ctensor, saveVar Ctensor, epsilon float64, reserveSpace Ctensor){ +cepsilon := *(*C.double)(unsafe.Pointer(&epsilon)) +C.atg_cudnn_batch_norm_backward(ptr, input, gradOutput, weight, runningMean, runningVar, saveMean, saveVar, cepsilon, reserveSpace) +} +func AtgCudnnConvolution(ptr *Ctensor, self Ctensor, weight Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){ +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) +cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) +cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) +C.atg_cudnn_convolution(ptr, self, weight, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) +} +func AtgCudnnConvolution1(ptr *Ctensor, self Ctensor, weight Ctensor, bias Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){ +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) +cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) +cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) +C.atg_cudnn_convolution1(ptr, self, weight, bias, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) +} +func AtgCudnnConvolution2(ptr *Ctensor, self Ctensor, weight Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32, allowTf32 int32){ +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) +cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) +cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) +callowTf32 := *(*C.int)(unsafe.Pointer(&allowTf32)) +C.atg_cudnn_convolution2(ptr, self, weight, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic, callowTf32) +} +func AtgCudnnConvolutionBackwardInput(ptr *Ctensor, selfSizeData []int64, selfSizeLen int, gradOutput Ctensor, weight Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32, allowTf32 int32){ +cselfSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&selfSizeData[0])) +cselfSizeLen := *(*C.int)(unsafe.Pointer(&selfSizeLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) +cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) +cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) +callowTf32 := *(*C.int)(unsafe.Pointer(&allowTf32)) +C.atg_cudnn_convolution_backward_input(ptr, cselfSizeDataPtr, cselfSizeLen, gradOutput, weight, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic, callowTf32) +} +func AtgCudnnConvolutionBackwardWeight(ptr *Ctensor, weightSizeData []int64, weightSizeLen int, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32, allowTf32 int32){ +cweightSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&weightSizeData[0])) +cweightSizeLen := *(*C.int)(unsafe.Pointer(&weightSizeLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) +cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) +cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) +callowTf32 := *(*C.int)(unsafe.Pointer(&allowTf32)) +C.atg_cudnn_convolution_backward_weight(ptr, cweightSizeDataPtr, cweightSizeLen, gradOutput, self, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic, callowTf32) +} +func AtgCudnnConvolutionTranspose(ptr *Ctensor, self Ctensor, weight Ctensor, paddingData []int64, paddingLen int, outputPaddingData []int64, outputPaddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){ +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) +coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) +cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) +cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) +C.atg_cudnn_convolution_transpose(ptr, self, weight, cpaddingDataPtr, cpaddingLen, coutputPaddingDataPtr, coutputPaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) +} +func AtgCudnnConvolutionTranspose1(ptr *Ctensor, self Ctensor, weight Ctensor, bias Ctensor, paddingData []int64, paddingLen int, outputPaddingData []int64, outputPaddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){ +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) +coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) +cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) +cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) +C.atg_cudnn_convolution_transpose1(ptr, self, weight, bias, cpaddingDataPtr, cpaddingLen, coutputPaddingDataPtr, coutputPaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) +} +func AtgCudnnConvolutionTranspose2(ptr *Ctensor, self Ctensor, weight Ctensor, paddingData []int64, paddingLen int, outputPaddingData []int64, outputPaddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32, allowTf32 int32){ +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) +coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) +cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) +cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) +callowTf32 := *(*C.int)(unsafe.Pointer(&allowTf32)) +C.atg_cudnn_convolution_transpose2(ptr, self, weight, cpaddingDataPtr, cpaddingLen, coutputPaddingDataPtr, coutputPaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic, callowTf32) +} +func AtgCudnnConvolutionTransposeBackwardInput(ptr *Ctensor, gradOutput Ctensor, weight Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32, allowTf32 int32){ +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) +cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) +cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) +callowTf32 := *(*C.int)(unsafe.Pointer(&allowTf32)) +C.atg_cudnn_convolution_transpose_backward_input(ptr, gradOutput, weight, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic, callowTf32) +} +func AtgCudnnConvolutionTransposeBackwardWeight(ptr *Ctensor, weightSizeData []int64, weightSizeLen int, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32, allowTf32 int32){ +cweightSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&weightSizeData[0])) +cweightSizeLen := *(*C.int)(unsafe.Pointer(&weightSizeLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) +cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) +cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) +callowTf32 := *(*C.int)(unsafe.Pointer(&allowTf32)) +C.atg_cudnn_convolution_transpose_backward_weight(ptr, cweightSizeDataPtr, cweightSizeLen, gradOutput, self, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic, callowTf32) +} +func AtgCudnnGridSampler(ptr *Ctensor, self Ctensor, grid Ctensor){ +C.atg_cudnn_grid_sampler(ptr, self, grid) +} +func AtgCudnnGridSamplerBackward(ptr *Ctensor, self Ctensor, grid Ctensor, gradOutput Ctensor){ +C.atg_cudnn_grid_sampler_backward(ptr, self, grid, gradOutput) +} +func AtgCummax(ptr *Ctensor, self Ctensor, dim int64){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_cummax(ptr, self, cdim) +} +func AtgCummaxOut(ptr *Ctensor, values Ctensor, indices Ctensor, self Ctensor, dim int64){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_cummax_out(ptr, values, indices, self, cdim) +} +func AtgCummaxminBackward(ptr *Ctensor, grad Ctensor, input Ctensor, indices Ctensor, dim int64){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_cummaxmin_backward(ptr, grad, input, indices, cdim) +} +func AtgCummin(ptr *Ctensor, self Ctensor, dim int64){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_cummin(ptr, self, cdim) +} +func AtgCumminOut(ptr *Ctensor, values Ctensor, indices Ctensor, self Ctensor, dim int64){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_cummin_out(ptr, values, indices, self, cdim) +} +func AtgCumprod(ptr *Ctensor, self Ctensor, dim int64, dtype int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +cdtype := *(*C.int)(unsafe.Pointer(&dtype)) +C.atg_cumprod(ptr, self, cdim, cdtype) +} +func AtgCumprodBackward(ptr *Ctensor, grad Ctensor, input Ctensor, dim int64){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_cumprod_backward(ptr, grad, input, cdim) +} +func AtgCumprodOut(ptr *Ctensor, out Ctensor, self Ctensor, dim int64, dtype int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +cdtype := *(*C.int)(unsafe.Pointer(&dtype)) +C.atg_cumprod_out(ptr, out, self, cdim, cdtype) +} +func AtgCumsum(ptr *Ctensor, self Ctensor, dim int64, dtype int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +cdtype := *(*C.int)(unsafe.Pointer(&dtype)) +C.atg_cumsum(ptr, self, cdim, cdtype) +} +func AtgCumsumOut(ptr *Ctensor, out Ctensor, self Ctensor, dim int64, dtype int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +cdtype := *(*C.int)(unsafe.Pointer(&dtype)) +C.atg_cumsum_out(ptr, out, self, cdim, cdtype) +} +func AtgData(ptr *Ctensor, self Ctensor){ +C.atg_data(ptr, self) +} +func AtgDeg2rad(ptr *Ctensor, self Ctensor){ +C.atg_deg2rad(ptr, self) +} +func AtgDeg2rad_(ptr *Ctensor, self Ctensor){ +C.atg_deg2rad_(ptr, self) +} +func AtgDeg2radOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_deg2rad_out(ptr, out, self) +} +func AtgDequantize(ptr *Ctensor, self Ctensor){ +C.atg_dequantize(ptr, self) } -func AtgUnfold(ptr *Ctensor, self Ctensor, dimension int64, size int64, step int64) { - cdimension := *(*C.int64_t)(unsafe.Pointer(&dimension)) - csize := *(*C.int64_t)(unsafe.Pointer(&size)) - cstep := *(*C.int64_t)(unsafe.Pointer(&step)) - C.atg_unfold(ptr, self, cdimension, csize, cstep) +func AtgDet(ptr *Ctensor, self Ctensor){ +C.atg_det(ptr, self) } -func AtgUniform_(ptr *Ctensor, self Ctensor, from float64, to float64) { - cfrom := *(*C.double)(unsafe.Pointer(&from)) - cto := *(*C.double)(unsafe.Pointer(&to)) - C.atg_uniform_(ptr, self, cfrom, cto) +func AtgDetach(ptr *Ctensor, self Ctensor){ +C.atg_detach(ptr, self) } -func AtgUniqueConsecutive(ptr *Ctensor, self Ctensor, returnInverse int32, returnCounts int32, dim int64) { - creturnInverse := *(*C.int)(unsafe.Pointer(&returnInverse)) - creturnCounts := *(*C.int)(unsafe.Pointer(&returnCounts)) - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - C.atg_unique_consecutive(ptr, self, creturnInverse, creturnCounts, cdim) +func AtgDetach_(ptr *Ctensor, self Ctensor){ +C.atg_detach_(ptr, self) } -func AtgUniqueDim(ptr *Ctensor, self Ctensor, dim int64, sorted int32, returnInverse int32, returnCounts int32) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - csorted := *(*C.int)(unsafe.Pointer(&sorted)) - creturnInverse := *(*C.int)(unsafe.Pointer(&returnInverse)) - creturnCounts := *(*C.int)(unsafe.Pointer(&returnCounts)) - C.atg_unique_dim(ptr, self, cdim, csorted, creturnInverse, creturnCounts) +func AtgDiag(ptr *Ctensor, self Ctensor, diagonal int64){ +cdiagonal := *(*C.int64_t)(unsafe.Pointer(&diagonal)) +C.atg_diag(ptr, self, cdiagonal) } -func AtgUniqueDimConsecutive(ptr *Ctensor, self Ctensor, dim int64, returnInverse int32, returnCounts int32) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - creturnInverse := *(*C.int)(unsafe.Pointer(&returnInverse)) - creturnCounts := *(*C.int)(unsafe.Pointer(&returnCounts)) - C.atg_unique_dim_consecutive(ptr, self, cdim, creturnInverse, creturnCounts) +func AtgDiagBackward(ptr *Ctensor, grad Ctensor, inputSizesData []int64, inputSizesLen int, diagonal int64){ +cinputSizesDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizesData[0])) +cinputSizesLen := *(*C.int)(unsafe.Pointer(&inputSizesLen)) +cdiagonal := *(*C.int64_t)(unsafe.Pointer(&diagonal)) +C.atg_diag_backward(ptr, grad, cinputSizesDataPtr, cinputSizesLen, cdiagonal) } -func AtgUnsqueeze(ptr *Ctensor, self Ctensor, dim int64) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - C.atg_unsqueeze(ptr, self, cdim) +func AtgDiagEmbed(ptr *Ctensor, self Ctensor, offset int64, dim1 int64, dim2 int64){ +coffset := *(*C.int64_t)(unsafe.Pointer(&offset)) +cdim1 := *(*C.int64_t)(unsafe.Pointer(&dim1)) +cdim2 := *(*C.int64_t)(unsafe.Pointer(&dim2)) +C.atg_diag_embed(ptr, self, coffset, cdim1, cdim2) } -func AtgUnsqueeze_(ptr *Ctensor, self Ctensor, dim int64) { - cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) - C.atg_unsqueeze_(ptr, self, cdim) +func AtgDiagOut(ptr *Ctensor, out Ctensor, self Ctensor, diagonal int64){ +cdiagonal := *(*C.int64_t)(unsafe.Pointer(&diagonal)) +C.atg_diag_out(ptr, out, self, cdiagonal) } -func AtgUpsampleBicubic2d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, alignCorners int32, scalesH float64, scalesW float64) { - coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) - coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) - calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) - cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) - cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) - C.atg_upsample_bicubic2d(ptr, self, coutputSizeDataPtr, coutputSizeLen, calignCorners, cscalesH, cscalesW) +func AtgDiagflat(ptr *Ctensor, self Ctensor, offset int64){ +coffset := *(*C.int64_t)(unsafe.Pointer(&offset)) +C.atg_diagflat(ptr, self, coffset) } -func AtgUpsampleBicubic2dBackward(ptr *Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, alignCorners int32, scalesH float64, scalesW float64) { - coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) - coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) - cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) - cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) - calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) - cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) - cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) - C.atg_upsample_bicubic2d_backward(ptr, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, calignCorners, cscalesH, cscalesW) +func AtgDiagonal(ptr *Ctensor, self Ctensor, offset int64, dim1 int64, dim2 int64){ +coffset := *(*C.int64_t)(unsafe.Pointer(&offset)) +cdim1 := *(*C.int64_t)(unsafe.Pointer(&dim1)) +cdim2 := *(*C.int64_t)(unsafe.Pointer(&dim2)) +C.atg_diagonal(ptr, self, coffset, cdim1, cdim2) } -func AtgUpsampleBicubic2dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, alignCorners int32, scalesH float64, scalesW float64) { - coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) - coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) - cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) - cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) - calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) - cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) - cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) - C.atg_upsample_bicubic2d_backward_out(ptr, gradInput, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, calignCorners, cscalesH, cscalesW) +func AtgDiagonalBackward(ptr *Ctensor, grad Ctensor, inputSizesData []int64, inputSizesLen int, offset int64, dim1 int64, dim2 int64){ +cinputSizesDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizesData[0])) +cinputSizesLen := *(*C.int)(unsafe.Pointer(&inputSizesLen)) +coffset := *(*C.int64_t)(unsafe.Pointer(&offset)) +cdim1 := *(*C.int64_t)(unsafe.Pointer(&dim1)) +cdim2 := *(*C.int64_t)(unsafe.Pointer(&dim2)) +C.atg_diagonal_backward(ptr, grad, cinputSizesDataPtr, cinputSizesLen, coffset, cdim1, cdim2) } -func AtgUpsampleBicubic2dOut(ptr *Ctensor, out Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, alignCorners int32, scalesH float64, scalesW float64) { - coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) - coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) - calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) - cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) - cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) - C.atg_upsample_bicubic2d_out(ptr, out, self, coutputSizeDataPtr, coutputSizeLen, calignCorners, cscalesH, cscalesW) +func AtgDigamma(ptr *Ctensor, self Ctensor){ +C.atg_digamma(ptr, self) } -func AtgUpsampleBilinear2d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, alignCorners int32, scalesH float64, scalesW float64) { - coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) - coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) - calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) - cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) - cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) - C.atg_upsample_bilinear2d(ptr, self, coutputSizeDataPtr, coutputSizeLen, calignCorners, cscalesH, cscalesW) +func AtgDigamma_(ptr *Ctensor, self Ctensor){ +C.atg_digamma_(ptr, self) } -func AtgUpsampleBilinear2dBackward(ptr *Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, alignCorners int32, scalesH float64, scalesW float64) { - coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) - coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) - cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) - cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) - calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) - cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) - cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) - C.atg_upsample_bilinear2d_backward(ptr, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, calignCorners, cscalesH, cscalesW) +func AtgDigammaOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_digamma_out(ptr, out, self) } -func AtgUpsampleBilinear2dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, alignCorners int32, scalesH float64, scalesW float64) { - coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) - coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) - cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) - cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) - calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) - cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) - cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) - C.atg_upsample_bilinear2d_backward_out(ptr, gradInput, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, calignCorners, cscalesH, cscalesW) +func AtgDist(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_dist(ptr, self, other) } -func AtgUpsampleBilinear2dOut(ptr *Ctensor, out Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, alignCorners int32, scalesH float64, scalesW float64) { - coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) - coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) - calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) - cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) - cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) - C.atg_upsample_bilinear2d_out(ptr, out, self, coutputSizeDataPtr, coutputSizeLen, calignCorners, cscalesH, cscalesW) +func AtgDiv(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_div(ptr, self, other) } -func AtgUpsampleLinear1d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, alignCorners int32, scales float64) { - coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) - coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) - calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) - cscales := *(*C.double)(unsafe.Pointer(&scales)) - C.atg_upsample_linear1d(ptr, self, coutputSizeDataPtr, coutputSizeLen, calignCorners, cscales) +func AtgDiv1(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_div1(ptr, self, other ) } -func AtgUpsampleLinear1dBackward(ptr *Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, alignCorners int32, scales float64) { - coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) - coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) - cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) - cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) - calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) - cscales := *(*C.double)(unsafe.Pointer(&scales)) - C.atg_upsample_linear1d_backward(ptr, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, calignCorners, cscales) +func AtgDiv_(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_div_(ptr, self, other) } -func AtgUpsampleLinear1dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, alignCorners int32, scales float64) { - coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) - coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) - cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) - cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) - calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) - cscales := *(*C.double)(unsafe.Pointer(&scales)) - C.atg_upsample_linear1d_backward_out(ptr, gradInput, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, calignCorners, cscales) +func AtgDiv1_(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_div_1(ptr, self, other ) } -func AtgUpsampleLinear1dOut(ptr *Ctensor, out Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, alignCorners int32, scales float64) { - coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) - coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) - calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) - cscales := *(*C.double)(unsafe.Pointer(&scales)) - C.atg_upsample_linear1d_out(ptr, out, self, coutputSizeDataPtr, coutputSizeLen, calignCorners, cscales) +func AtgDivOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ +C.atg_div_out(ptr, out, self, other) } -func AtgUpsampleNearest1d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, scales float64) { - coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) - coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) - cscales := *(*C.double)(unsafe.Pointer(&scales)) - C.atg_upsample_nearest1d(ptr, self, coutputSizeDataPtr, coutputSizeLen, cscales) +func AtgDivide(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_divide(ptr, self, other) } -func AtgUpsampleNearest1dBackward(ptr *Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, scales float64) { - coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) - coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) - cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) - cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) - cscales := *(*C.double)(unsafe.Pointer(&scales)) - C.atg_upsample_nearest1d_backward(ptr, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, cscales) +func AtgDivide1(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_divide1(ptr, self, other ) } -func AtgUpsampleNearest1dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, scales float64) { - coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) - coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) - cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) - cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) - cscales := *(*C.double)(unsafe.Pointer(&scales)) - C.atg_upsample_nearest1d_backward_out(ptr, gradInput, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, cscales) +func AtgDivide_(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_divide_(ptr, self, other) } -func AtgUpsampleNearest1dOut(ptr *Ctensor, out Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, scales float64) { - coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) - coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) - cscales := *(*C.double)(unsafe.Pointer(&scales)) - C.atg_upsample_nearest1d_out(ptr, out, self, coutputSizeDataPtr, coutputSizeLen, cscales) +func AtgDivide1_(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_divide_1(ptr, self, other ) } -func AtgUpsampleNearest2d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, scalesH float64, scalesW float64) { - coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) - coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) - cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) - cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) - C.atg_upsample_nearest2d(ptr, self, coutputSizeDataPtr, coutputSizeLen, cscalesH, cscalesW) +func AtgDivideOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ +C.atg_divide_out(ptr, out, self, other) } -func AtgUpsampleNearest2dBackward(ptr *Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, scalesH float64, scalesW float64) { - coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) - coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) - cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) - cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) - cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) - cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) - C.atg_upsample_nearest2d_backward(ptr, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, cscalesH, cscalesW) +func AtgDot(ptr *Ctensor, self Ctensor, tensor Ctensor){ +C.atg_dot(ptr, self, tensor) } -func AtgUpsampleNearest2dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, scalesH float64, scalesW float64) { - coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) - coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) - cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) - cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) - cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) - cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) - C.atg_upsample_nearest2d_backward_out(ptr, gradInput, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, cscalesH, cscalesW) +func AtgDotOut(ptr *Ctensor, out Ctensor, self Ctensor, tensor Ctensor){ +C.atg_dot_out(ptr, out, self, tensor) } -func AtgUpsampleNearest2dOut(ptr *Ctensor, out Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, scalesH float64, scalesW float64) { - coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) - coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) - cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) - cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) - C.atg_upsample_nearest2d_out(ptr, out, self, coutputSizeDataPtr, coutputSizeLen, cscalesH, cscalesW) +func AtgDropout(ptr *Ctensor, input Ctensor, p float64, train int32){ +cp := *(*C.double)(unsafe.Pointer(&p)) +ctrain := *(*C.int)(unsafe.Pointer(&train)) +C.atg_dropout(ptr, input, cp, ctrain) } -func AtgUpsampleNearest3d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, scalesD float64, scalesH float64, scalesW float64) { - coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) - coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) - cscalesD := *(*C.double)(unsafe.Pointer(&scalesD)) - cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) - cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) - C.atg_upsample_nearest3d(ptr, self, coutputSizeDataPtr, coutputSizeLen, cscalesD, cscalesH, cscalesW) +func AtgDropout_(ptr *Ctensor, self Ctensor, p float64, train int32){ +cp := *(*C.double)(unsafe.Pointer(&p)) +ctrain := *(*C.int)(unsafe.Pointer(&train)) +C.atg_dropout_(ptr, self, cp, ctrain) } -func AtgUpsampleNearest3dBackward(ptr *Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, scalesD float64, scalesH float64, scalesW float64) { - coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) - coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) - cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) - cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) - cscalesD := *(*C.double)(unsafe.Pointer(&scalesD)) - cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) - cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) - C.atg_upsample_nearest3d_backward(ptr, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, cscalesD, cscalesH, cscalesW) +func AtgDstack(ptr *Ctensor, tensorsData []Ctensor, tensorsLen int){ +ctensorsDataPtr := (*Ctensor)(unsafe.Pointer(&tensorsData[0])) +ctensorsLen := *(*C.int)(unsafe.Pointer(&tensorsLen)) +C.atg_dstack(ptr, ctensorsDataPtr, ctensorsLen) } -func AtgUpsampleNearest3dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, scalesD float64, scalesH float64, scalesW float64) { - coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) - coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) - cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) - cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) - cscalesD := *(*C.double)(unsafe.Pointer(&scalesD)) - cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) - cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) - C.atg_upsample_nearest3d_backward_out(ptr, gradInput, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, cscalesD, cscalesH, cscalesW) +func AtgDstackOut(ptr *Ctensor, out Ctensor, tensorsData []Ctensor, tensorsLen int){ +ctensorsDataPtr := (*Ctensor)(unsafe.Pointer(&tensorsData[0])) +ctensorsLen := *(*C.int)(unsafe.Pointer(&tensorsLen)) +C.atg_dstack_out(ptr, out, ctensorsDataPtr, ctensorsLen) } -func AtgUpsampleNearest3dOut(ptr *Ctensor, out Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, scalesD float64, scalesH float64, scalesW float64) { - coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) - coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) - cscalesD := *(*C.double)(unsafe.Pointer(&scalesD)) - cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) - cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) - C.atg_upsample_nearest3d_out(ptr, out, self, coutputSizeDataPtr, coutputSizeLen, cscalesD, cscalesH, cscalesW) +func AtgEig(ptr *Ctensor, self Ctensor, eigenvectors int32){ +ceigenvectors := *(*C.int)(unsafe.Pointer(&eigenvectors)) +C.atg_eig(ptr, self, ceigenvectors) } -func AtgUpsampleTrilinear3d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, alignCorners int32, scalesD float64, scalesH float64, scalesW float64) { - coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) - coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) - calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) - cscalesD := *(*C.double)(unsafe.Pointer(&scalesD)) - cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) - cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) - C.atg_upsample_trilinear3d(ptr, self, coutputSizeDataPtr, coutputSizeLen, calignCorners, cscalesD, cscalesH, cscalesW) +func AtgEigOut(ptr *Ctensor, e Ctensor, v Ctensor, self Ctensor, eigenvectors int32){ +ceigenvectors := *(*C.int)(unsafe.Pointer(&eigenvectors)) +C.atg_eig_out(ptr, e, v, self, ceigenvectors) } -func AtgUpsampleTrilinear3dBackward(ptr *Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, alignCorners int32, scalesD float64, scalesH float64, scalesW float64) { - coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) - coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) - cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) - cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) - calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) - cscalesD := *(*C.double)(unsafe.Pointer(&scalesD)) - cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) - cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) - C.atg_upsample_trilinear3d_backward(ptr, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, calignCorners, cscalesD, cscalesH, cscalesW) +func AtgEinsum(ptr *Ctensor, equation string, tensorsData []Ctensor, tensorsLen int){ +cequation := C.CString(equation) +equationLen := len(equation) +cequationLen := *(*C.int)(unsafe.Pointer(&equationLen)) +ctensorsDataPtr := (*Ctensor)(unsafe.Pointer(&tensorsData[0])) +ctensorsLen := *(*C.int)(unsafe.Pointer(&tensorsLen)) +C.atg_einsum(ptr, cequation, cequationLen, ctensorsDataPtr, ctensorsLen) } -func AtgUpsampleTrilinear3dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, alignCorners int32, scalesD float64, scalesH float64, scalesW float64) { - coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) - coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) - cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) - cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) - calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) - cscalesD := *(*C.double)(unsafe.Pointer(&scalesD)) - cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) - cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) - C.atg_upsample_trilinear3d_backward_out(ptr, gradInput, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, calignCorners, cscalesD, cscalesH, cscalesW) +func AtgElu(ptr *Ctensor, self Ctensor){ +C.atg_elu(ptr, self) } -func AtgUpsampleTrilinear3dOut(ptr *Ctensor, out Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, alignCorners int32, scalesD float64, scalesH float64, scalesW float64) { - coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) - coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) - calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) - cscalesD := *(*C.double)(unsafe.Pointer(&scalesD)) - cscalesH := *(*C.double)(unsafe.Pointer(&scalesH)) - cscalesW := *(*C.double)(unsafe.Pointer(&scalesW)) - C.atg_upsample_trilinear3d_out(ptr, out, self, coutputSizeDataPtr, coutputSizeLen, calignCorners, cscalesD, cscalesH, cscalesW) +func AtgElu_(ptr *Ctensor, self Ctensor){ +C.atg_elu_(ptr, self) } -func AtgValues(ptr *Ctensor, self Ctensor) { - C.atg_values(ptr, self) +func AtgEluBackward(ptr *Ctensor, gradOutput Ctensor, alpha Cscalar, scale Cscalar, inputScale Cscalar, output Ctensor){ +C.atg_elu_backward(ptr, gradOutput, alpha , scale , inputScale , output) } -func AtgVar(ptr *Ctensor, self Ctensor, unbiased int32) { - cunbiased := *(*C.int)(unsafe.Pointer(&unbiased)) - C.atg_var(ptr, self, cunbiased) +func AtgEluBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, alpha Cscalar, scale Cscalar, inputScale Cscalar, output Ctensor){ +C.atg_elu_backward_out(ptr, gradInput, gradOutput, alpha , scale , inputScale , output) } -func AtgVar1(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, unbiased int32, keepdim int32) { - cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) - cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) - cunbiased := *(*C.int)(unsafe.Pointer(&unbiased)) - ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) - C.atg_var1(ptr, self, cdimDataPtr, cdimLen, cunbiased, ckeepdim) +func AtgEluOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_elu_out(ptr, out, self) } -func AtgVarMean(ptr *Ctensor, self Ctensor, unbiased int32) { - cunbiased := *(*C.int)(unsafe.Pointer(&unbiased)) - C.atg_var_mean(ptr, self, cunbiased) +func AtgEmbedding(ptr *Ctensor, weight Ctensor, indices Ctensor, paddingIdx int64, scaleGradByFreq int32, sparse int32){ +cpaddingIdx := *(*C.int64_t)(unsafe.Pointer(&paddingIdx)) +cscaleGradByFreq := *(*C.int)(unsafe.Pointer(&scaleGradByFreq)) +csparse := *(*C.int)(unsafe.Pointer(&sparse)) +C.atg_embedding(ptr, weight, indices, cpaddingIdx, cscaleGradByFreq, csparse) } -func AtgVarMean1(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, unbiased int32, keepdim int32) { - cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) - cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) - cunbiased := *(*C.int)(unsafe.Pointer(&unbiased)) - ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) - C.atg_var_mean1(ptr, self, cdimDataPtr, cdimLen, cunbiased, ckeepdim) +func AtgEmbeddingBackward(ptr *Ctensor, grad Ctensor, indices Ctensor, numWeights int64, paddingIdx int64, scaleGradByFreq int32, sparse int32){ +cnumWeights := *(*C.int64_t)(unsafe.Pointer(&numWeights)) +cpaddingIdx := *(*C.int64_t)(unsafe.Pointer(&paddingIdx)) +cscaleGradByFreq := *(*C.int)(unsafe.Pointer(&scaleGradByFreq)) +csparse := *(*C.int)(unsafe.Pointer(&sparse)) +C.atg_embedding_backward(ptr, grad, indices, cnumWeights, cpaddingIdx, cscaleGradByFreq, csparse) } -func AtgVarOut(ptr *Ctensor, out Ctensor, self Ctensor, dimData []int64, dimLen int, unbiased int32, keepdim int32) { - cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) - cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) - cunbiased := *(*C.int)(unsafe.Pointer(&unbiased)) - ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) - C.atg_var_out(ptr, out, self, cdimDataPtr, cdimLen, cunbiased, ckeepdim) +func AtgEmbeddingBag(ptr *Ctensor, weight Ctensor, indices Ctensor, offsets Ctensor, scaleGradByFreq int32, mode int64, sparse int32, perSampleWeights Ctensor, includeLastOffset int32){ +cscaleGradByFreq := *(*C.int)(unsafe.Pointer(&scaleGradByFreq)) +cmode := *(*C.int64_t)(unsafe.Pointer(&mode)) +csparse := *(*C.int)(unsafe.Pointer(&sparse)) +cincludeLastOffset := *(*C.int)(unsafe.Pointer(&includeLastOffset)) +C.atg_embedding_bag(ptr, weight, indices, offsets, cscaleGradByFreq, cmode, csparse, perSampleWeights, cincludeLastOffset) } -func AtgView(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int) { - csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) - csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) - C.atg_view(ptr, self, csizeDataPtr, csizeLen) +func AtgEmbeddingDenseBackward(ptr *Ctensor, gradOutput Ctensor, indices Ctensor, numWeights int64, paddingIdx int64, scaleGradByFreq int32){ +cnumWeights := *(*C.int64_t)(unsafe.Pointer(&numWeights)) +cpaddingIdx := *(*C.int64_t)(unsafe.Pointer(&paddingIdx)) +cscaleGradByFreq := *(*C.int)(unsafe.Pointer(&scaleGradByFreq)) +C.atg_embedding_dense_backward(ptr, gradOutput, indices, cnumWeights, cpaddingIdx, cscaleGradByFreq) } -func AtgViewAs(ptr *Ctensor, self Ctensor, other Ctensor) { - C.atg_view_as(ptr, self, other) +func AtgEmbeddingRenorm_(ptr *Ctensor, self Ctensor, indices Ctensor, maxNorm float64, normType float64){ +cmaxNorm := *(*C.double)(unsafe.Pointer(&maxNorm)) +cnormType := *(*C.double)(unsafe.Pointer(&normType)) +C.atg_embedding_renorm_(ptr, self, indices, cmaxNorm, cnormType) +} +func AtgEmbeddingSparseBackward(ptr *Ctensor, grad Ctensor, indices Ctensor, numWeights int64, paddingIdx int64, scaleGradByFreq int32){ +cnumWeights := *(*C.int64_t)(unsafe.Pointer(&numWeights)) +cpaddingIdx := *(*C.int64_t)(unsafe.Pointer(&paddingIdx)) +cscaleGradByFreq := *(*C.int)(unsafe.Pointer(&scaleGradByFreq)) +C.atg_embedding_sparse_backward(ptr, grad, indices, cnumWeights, cpaddingIdx, cscaleGradByFreq) +} +func AtgEmpty(ptr *Ctensor, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32){ +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_empty(ptr, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice) +} +func AtgEmptyLike(ptr *Ctensor, self Ctensor){ +C.atg_empty_like(ptr, self) +} +func AtgEmptyMeta(ptr *Ctensor, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32){ +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_empty_meta(ptr, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice) +} +func AtgEmptyOut(ptr *Ctensor, out Ctensor, sizeData []int64, sizeLen int){ +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +C.atg_empty_out(ptr, out, csizeDataPtr, csizeLen) +} +func AtgEmptyQuantized(ptr *Ctensor, sizeData []int64, sizeLen int, qtensor Ctensor){ +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +C.atg_empty_quantized(ptr, csizeDataPtr, csizeLen, qtensor) +} +func AtgEmptyStrided(ptr *Ctensor, sizeData []int64, sizeLen int, strideData []int64, strideLen int, optionsKind int32, optionsDevice int32){ +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_empty_strided(ptr, csizeDataPtr, csizeLen, cstrideDataPtr, cstrideLen, coptionsKind, coptionsDevice) +} +func AtgEq(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_eq(ptr, self, other ) +} +func AtgEq1(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_eq1(ptr, self, other) +} +func AtgEq_(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_eq_(ptr, self, other ) +} +func AtgEq1_(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_eq_1(ptr, self, other) +} +func AtgEqOut(ptr *Ctensor, out Ctensor, self Ctensor, other Cscalar){ +C.atg_eq_out(ptr, out, self, other ) +} +func AtgEqOut1(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ +C.atg_eq_out1(ptr, out, self, other) +} +func AtgErf(ptr *Ctensor, self Ctensor){ +C.atg_erf(ptr, self) +} +func AtgErf_(ptr *Ctensor, self Ctensor){ +C.atg_erf_(ptr, self) +} +func AtgErfOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_erf_out(ptr, out, self) +} +func AtgErfc(ptr *Ctensor, self Ctensor){ +C.atg_erfc(ptr, self) +} +func AtgErfc_(ptr *Ctensor, self Ctensor){ +C.atg_erfc_(ptr, self) +} +func AtgErfcOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_erfc_out(ptr, out, self) +} +func AtgErfinv(ptr *Ctensor, self Ctensor){ +C.atg_erfinv(ptr, self) +} +func AtgErfinv_(ptr *Ctensor, self Ctensor){ +C.atg_erfinv_(ptr, self) +} +func AtgErfinvOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_erfinv_out(ptr, out, self) +} +func AtgExp(ptr *Ctensor, self Ctensor){ +C.atg_exp(ptr, self) +} +func AtgExp2(ptr *Ctensor, self Ctensor){ +C.atg_exp2(ptr, self) +} +func AtgExp2_(ptr *Ctensor, self Ctensor){ +C.atg_exp2_(ptr, self) +} +func AtgExp2Out(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_exp2_out(ptr, out, self) +} +func AtgExp_(ptr *Ctensor, self Ctensor){ +C.atg_exp_(ptr, self) +} +func AtgExpOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_exp_out(ptr, out, self) +} +func AtgExpand(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int, implicit int32){ +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +cimplicit := *(*C.int)(unsafe.Pointer(&implicit)) +C.atg_expand(ptr, self, csizeDataPtr, csizeLen, cimplicit) +} +func AtgExpandAs(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_expand_as(ptr, self, other) +} +func AtgExpm1(ptr *Ctensor, self Ctensor){ +C.atg_expm1(ptr, self) +} +func AtgExpm1_(ptr *Ctensor, self Ctensor){ +C.atg_expm1_(ptr, self) +} +func AtgExpm1Out(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_expm1_out(ptr, out, self) +} +func AtgExponential_(ptr *Ctensor, self Ctensor, lambd float64){ +clambd := *(*C.double)(unsafe.Pointer(&lambd)) +C.atg_exponential_(ptr, self, clambd) +} +func AtgEye(ptr *Ctensor, n int64, optionsKind int32, optionsDevice int32){ +cn := *(*C.int64_t)(unsafe.Pointer(&n)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_eye(ptr, cn, coptionsKind, coptionsDevice) +} +func AtgEye1(ptr *Ctensor, n int64, m int64, optionsKind int32, optionsDevice int32){ +cn := *(*C.int64_t)(unsafe.Pointer(&n)) +cm := *(*C.int64_t)(unsafe.Pointer(&m)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_eye1(ptr, cn, cm, coptionsKind, coptionsDevice) +} +func AtgEyeOut(ptr *Ctensor, out Ctensor, n int64){ +cn := *(*C.int64_t)(unsafe.Pointer(&n)) +C.atg_eye_out(ptr, out, cn) +} +func AtgEyeOut1(ptr *Ctensor, out Ctensor, n int64, m int64){ +cn := *(*C.int64_t)(unsafe.Pointer(&n)) +cm := *(*C.int64_t)(unsafe.Pointer(&m)) +C.atg_eye_out1(ptr, out, cn, cm) +} +func AtgFakeQuantizePerChannelAffine(ptr *Ctensor, self Ctensor, scale Ctensor, zeroPoint Ctensor, axis int64, quantMin int64, quantMax int64){ +caxis := *(*C.int64_t)(unsafe.Pointer(&axis)) +cquantMin := *(*C.int64_t)(unsafe.Pointer(&quantMin)) +cquantMax := *(*C.int64_t)(unsafe.Pointer(&quantMax)) +C.atg_fake_quantize_per_channel_affine(ptr, self, scale, zeroPoint, caxis, cquantMin, cquantMax) +} +func AtgFakeQuantizePerChannelAffineBackward(ptr *Ctensor, grad Ctensor, self Ctensor, scale Ctensor, zeroPoint Ctensor, axis int64, quantMin int64, quantMax int64){ +caxis := *(*C.int64_t)(unsafe.Pointer(&axis)) +cquantMin := *(*C.int64_t)(unsafe.Pointer(&quantMin)) +cquantMax := *(*C.int64_t)(unsafe.Pointer(&quantMax)) +C.atg_fake_quantize_per_channel_affine_backward(ptr, grad, self, scale, zeroPoint, caxis, cquantMin, cquantMax) +} +func AtgFakeQuantizePerTensorAffine(ptr *Ctensor, self Ctensor, scale float64, zeroPoint int64, quantMin int64, quantMax int64){ +cscale := *(*C.double)(unsafe.Pointer(&scale)) +czeroPoint := *(*C.int64_t)(unsafe.Pointer(&zeroPoint)) +cquantMin := *(*C.int64_t)(unsafe.Pointer(&quantMin)) +cquantMax := *(*C.int64_t)(unsafe.Pointer(&quantMax)) +C.atg_fake_quantize_per_tensor_affine(ptr, self, cscale, czeroPoint, cquantMin, cquantMax) +} +func AtgFakeQuantizePerTensorAffineBackward(ptr *Ctensor, grad Ctensor, self Ctensor, scale float64, zeroPoint int64, quantMin int64, quantMax int64){ +cscale := *(*C.double)(unsafe.Pointer(&scale)) +czeroPoint := *(*C.int64_t)(unsafe.Pointer(&zeroPoint)) +cquantMin := *(*C.int64_t)(unsafe.Pointer(&quantMin)) +cquantMax := *(*C.int64_t)(unsafe.Pointer(&quantMax)) +C.atg_fake_quantize_per_tensor_affine_backward(ptr, grad, self, cscale, czeroPoint, cquantMin, cquantMax) +} +func AtgFbgemmLinearFp16Weight(ptr *Ctensor, input Ctensor, packedWeight Ctensor, bias Ctensor){ +C.atg_fbgemm_linear_fp16_weight(ptr, input, packedWeight, bias) +} +func AtgFbgemmLinearFp16WeightFp32Activation(ptr *Ctensor, input Ctensor, packedWeight Ctensor, bias Ctensor){ +C.atg_fbgemm_linear_fp16_weight_fp32_activation(ptr, input, packedWeight, bias) +} +func AtgFbgemmLinearInt8Weight(ptr *Ctensor, input Ctensor, weight Ctensor, packed Ctensor, colOffsets Ctensor, weightScale Cscalar, weightZeroPoint Cscalar, bias Ctensor){ +C.atg_fbgemm_linear_int8_weight(ptr, input, weight, packed, colOffsets, weightScale , weightZeroPoint , bias) +} +func AtgFbgemmLinearInt8WeightFp32Activation(ptr *Ctensor, input Ctensor, weight Ctensor, packed Ctensor, colOffsets Ctensor, weightScale Cscalar, weightZeroPoint Cscalar, bias Ctensor){ +C.atg_fbgemm_linear_int8_weight_fp32_activation(ptr, input, weight, packed, colOffsets, weightScale , weightZeroPoint , bias) +} +func AtgFbgemmPackGemmMatrixFp16(ptr *Ctensor, input Ctensor){ +C.atg_fbgemm_pack_gemm_matrix_fp16(ptr, input) +} +func AtgFbgemmPackQuantizedMatrix(ptr *Ctensor, input Ctensor){ +C.atg_fbgemm_pack_quantized_matrix(ptr, input) +} +func AtgFbgemmPackQuantizedMatrix1(ptr *Ctensor, input Ctensor, k int64, n int64){ +ck := *(*C.int64_t)(unsafe.Pointer(&k)) +cn := *(*C.int64_t)(unsafe.Pointer(&n)) +C.atg_fbgemm_pack_quantized_matrix1(ptr, input, ck, cn) +} +func AtgFeatureAlphaDropout(ptr *Ctensor, input Ctensor, p float64, train int32){ +cp := *(*C.double)(unsafe.Pointer(&p)) +ctrain := *(*C.int)(unsafe.Pointer(&train)) +C.atg_feature_alpha_dropout(ptr, input, cp, ctrain) +} +func AtgFeatureAlphaDropout_(ptr *Ctensor, self Ctensor, p float64, train int32){ +cp := *(*C.double)(unsafe.Pointer(&p)) +ctrain := *(*C.int)(unsafe.Pointer(&train)) +C.atg_feature_alpha_dropout_(ptr, self, cp, ctrain) +} +func AtgFeatureDropout(ptr *Ctensor, input Ctensor, p float64, train int32){ +cp := *(*C.double)(unsafe.Pointer(&p)) +ctrain := *(*C.int)(unsafe.Pointer(&train)) +C.atg_feature_dropout(ptr, input, cp, ctrain) +} +func AtgFeatureDropout_(ptr *Ctensor, self Ctensor, p float64, train int32){ +cp := *(*C.double)(unsafe.Pointer(&p)) +ctrain := *(*C.int)(unsafe.Pointer(&train)) +C.atg_feature_dropout_(ptr, self, cp, ctrain) +} +func AtgFft(ptr *Ctensor, self Ctensor, signalNdim int64, normalized int32){ +csignalNdim := *(*C.int64_t)(unsafe.Pointer(&signalNdim)) +cnormalized := *(*C.int)(unsafe.Pointer(&normalized)) +C.atg_fft(ptr, self, csignalNdim, cnormalized) +} +func AtgFftFft(ptr *Ctensor, self Ctensor, nVal int64, nNull int, dim int64, norm string){ +cnVal := *(*C.int64_t)(unsafe.Pointer(&nVal)) +cnNull := *(*C.uint8_t)(unsafe.Pointer(&nNull)) +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +cnorm := C.CString(norm) +normLen := len(norm) +cnormLen := *(*C.int)(unsafe.Pointer(&normLen)) +C.atg_fft_fft(ptr, self, cnVal, cnNull, cdim, cnorm, cnormLen) +} +func AtgFftFftn(ptr *Ctensor, self Ctensor, sData []int64, sLen int, dimData []int64, dimLen int, norm string){ +csDataPtr := (*C.int64_t)(unsafe.Pointer(&sData[0])) +csLen := *(*C.int)(unsafe.Pointer(&sLen)) +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +cnorm := C.CString(norm) +normLen := len(norm) +cnormLen := *(*C.int)(unsafe.Pointer(&normLen)) +C.atg_fft_fftn(ptr, self, csDataPtr, csLen, cdimDataPtr, cdimLen, cnorm, cnormLen) +} +func AtgFftHfft(ptr *Ctensor, self Ctensor, nVal int64, nNull int, dim int64, norm string){ +cnVal := *(*C.int64_t)(unsafe.Pointer(&nVal)) +cnNull := *(*C.uint8_t)(unsafe.Pointer(&nNull)) +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +cnorm := C.CString(norm) +normLen := len(norm) +cnormLen := *(*C.int)(unsafe.Pointer(&normLen)) +C.atg_fft_hfft(ptr, self, cnVal, cnNull, cdim, cnorm, cnormLen) +} +func AtgFftIfft(ptr *Ctensor, self Ctensor, nVal int64, nNull int, dim int64, norm string){ +cnVal := *(*C.int64_t)(unsafe.Pointer(&nVal)) +cnNull := *(*C.uint8_t)(unsafe.Pointer(&nNull)) +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +cnorm := C.CString(norm) +normLen := len(norm) +cnormLen := *(*C.int)(unsafe.Pointer(&normLen)) +C.atg_fft_ifft(ptr, self, cnVal, cnNull, cdim, cnorm, cnormLen) +} +func AtgFftIfftn(ptr *Ctensor, self Ctensor, sData []int64, sLen int, dimData []int64, dimLen int, norm string){ +csDataPtr := (*C.int64_t)(unsafe.Pointer(&sData[0])) +csLen := *(*C.int)(unsafe.Pointer(&sLen)) +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +cnorm := C.CString(norm) +normLen := len(norm) +cnormLen := *(*C.int)(unsafe.Pointer(&normLen)) +C.atg_fft_ifftn(ptr, self, csDataPtr, csLen, cdimDataPtr, cdimLen, cnorm, cnormLen) +} +func AtgFftIhfft(ptr *Ctensor, self Ctensor, nVal int64, nNull int, dim int64, norm string){ +cnVal := *(*C.int64_t)(unsafe.Pointer(&nVal)) +cnNull := *(*C.uint8_t)(unsafe.Pointer(&nNull)) +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +cnorm := C.CString(norm) +normLen := len(norm) +cnormLen := *(*C.int)(unsafe.Pointer(&normLen)) +C.atg_fft_ihfft(ptr, self, cnVal, cnNull, cdim, cnorm, cnormLen) +} +func AtgFftIrfft(ptr *Ctensor, self Ctensor, nVal int64, nNull int, dim int64, norm string){ +cnVal := *(*C.int64_t)(unsafe.Pointer(&nVal)) +cnNull := *(*C.uint8_t)(unsafe.Pointer(&nNull)) +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +cnorm := C.CString(norm) +normLen := len(norm) +cnormLen := *(*C.int)(unsafe.Pointer(&normLen)) +C.atg_fft_irfft(ptr, self, cnVal, cnNull, cdim, cnorm, cnormLen) +} +func AtgFftIrfftn(ptr *Ctensor, self Ctensor, sData []int64, sLen int, dimData []int64, dimLen int, norm string){ +csDataPtr := (*C.int64_t)(unsafe.Pointer(&sData[0])) +csLen := *(*C.int)(unsafe.Pointer(&sLen)) +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +cnorm := C.CString(norm) +normLen := len(norm) +cnormLen := *(*C.int)(unsafe.Pointer(&normLen)) +C.atg_fft_irfftn(ptr, self, csDataPtr, csLen, cdimDataPtr, cdimLen, cnorm, cnormLen) +} +func AtgFftRfft(ptr *Ctensor, self Ctensor, nVal int64, nNull int, dim int64, norm string){ +cnVal := *(*C.int64_t)(unsafe.Pointer(&nVal)) +cnNull := *(*C.uint8_t)(unsafe.Pointer(&nNull)) +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +cnorm := C.CString(norm) +normLen := len(norm) +cnormLen := *(*C.int)(unsafe.Pointer(&normLen)) +C.atg_fft_rfft(ptr, self, cnVal, cnNull, cdim, cnorm, cnormLen) +} +func AtgFftRfftn(ptr *Ctensor, self Ctensor, sData []int64, sLen int, dimData []int64, dimLen int, norm string){ +csDataPtr := (*C.int64_t)(unsafe.Pointer(&sData[0])) +csLen := *(*C.int)(unsafe.Pointer(&sLen)) +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +cnorm := C.CString(norm) +normLen := len(norm) +cnormLen := *(*C.int)(unsafe.Pointer(&normLen)) +C.atg_fft_rfftn(ptr, self, csDataPtr, csLen, cdimDataPtr, cdimLen, cnorm, cnormLen) +} +func AtgFill_(ptr *Ctensor, self Ctensor, value Cscalar){ +C.atg_fill_(ptr, self, value ) +} +func AtgFill1_(ptr *Ctensor, self Ctensor, value Ctensor){ +C.atg_fill_1(ptr, self, value) +} +func AtgFillDiagonal_(ptr *Ctensor, self Ctensor, fillValue Cscalar, wrap int32){ +cwrap := *(*C.int)(unsafe.Pointer(&wrap)) +C.atg_fill_diagonal_(ptr, self, fillValue , cwrap) +} +func AtgFix(ptr *Ctensor, self Ctensor){ +C.atg_fix(ptr, self) +} +func AtgFix_(ptr *Ctensor, self Ctensor){ +C.atg_fix_(ptr, self) +} +func AtgFixOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_fix_out(ptr, out, self) +} +func AtgFlatten(ptr *Ctensor, self Ctensor, startDim int64, endDim int64){ +cstartDim := *(*C.int64_t)(unsafe.Pointer(&startDim)) +cendDim := *(*C.int64_t)(unsafe.Pointer(&endDim)) +C.atg_flatten(ptr, self, cstartDim, cendDim) +} +func AtgFlip(ptr *Ctensor, self Ctensor, dimsData []int64, dimsLen int){ +cdimsDataPtr := (*C.int64_t)(unsafe.Pointer(&dimsData[0])) +cdimsLen := *(*C.int)(unsafe.Pointer(&dimsLen)) +C.atg_flip(ptr, self, cdimsDataPtr, cdimsLen) +} +func AtgFliplr(ptr *Ctensor, self Ctensor){ +C.atg_fliplr(ptr, self) +} +func AtgFlipud(ptr *Ctensor, self Ctensor){ +C.atg_flipud(ptr, self) +} +func AtgFloor(ptr *Ctensor, self Ctensor){ +C.atg_floor(ptr, self) +} +func AtgFloor_(ptr *Ctensor, self Ctensor){ +C.atg_floor_(ptr, self) +} +func AtgFloorDivide(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_floor_divide(ptr, self, other) +} +func AtgFloorDivide1(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_floor_divide1(ptr, self, other ) +} +func AtgFloorDivide_(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_floor_divide_(ptr, self, other) +} +func AtgFloorDivide1_(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_floor_divide_1(ptr, self, other ) +} +func AtgFloorDivideOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ +C.atg_floor_divide_out(ptr, out, self, other) +} +func AtgFloorOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_floor_out(ptr, out, self) +} +func AtgFmod(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_fmod(ptr, self, other ) +} +func AtgFmod1(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_fmod1(ptr, self, other) +} +func AtgFmod_(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_fmod_(ptr, self, other ) +} +func AtgFmod1_(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_fmod_1(ptr, self, other) +} +func AtgFmodOut(ptr *Ctensor, out Ctensor, self Ctensor, other Cscalar){ +C.atg_fmod_out(ptr, out, self, other ) +} +func AtgFmodOut1(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ +C.atg_fmod_out1(ptr, out, self, other) +} +func AtgFrac(ptr *Ctensor, self Ctensor){ +C.atg_frac(ptr, self) +} +func AtgFrac_(ptr *Ctensor, self Ctensor){ +C.atg_frac_(ptr, self) +} +func AtgFracOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_frac_out(ptr, out, self) +} +func AtgFractionalMaxPool2d(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, outputSizeData []int64, outputSizeLen int, randomSamples Ctensor){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +C.atg_fractional_max_pool2d(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, coutputSizeDataPtr, coutputSizeLen, randomSamples) +} +func AtgFractionalMaxPool2dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, outputSizeData []int64, outputSizeLen int, indices Ctensor){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +C.atg_fractional_max_pool2d_backward(ptr, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, coutputSizeDataPtr, coutputSizeLen, indices) +} +func AtgFractionalMaxPool2dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, outputSizeData []int64, outputSizeLen int, indices Ctensor){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +C.atg_fractional_max_pool2d_backward_out(ptr, gradInput, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, coutputSizeDataPtr, coutputSizeLen, indices) +} +func AtgFractionalMaxPool2dOut(ptr *Ctensor, output Ctensor, indices Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, outputSizeData []int64, outputSizeLen int, randomSamples Ctensor){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +C.atg_fractional_max_pool2d_out(ptr, output, indices, self, ckernelSizeDataPtr, ckernelSizeLen, coutputSizeDataPtr, coutputSizeLen, randomSamples) +} +func AtgFractionalMaxPool3d(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, outputSizeData []int64, outputSizeLen int, randomSamples Ctensor){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +C.atg_fractional_max_pool3d(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, coutputSizeDataPtr, coutputSizeLen, randomSamples) +} +func AtgFractionalMaxPool3dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, outputSizeData []int64, outputSizeLen int, indices Ctensor){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +C.atg_fractional_max_pool3d_backward(ptr, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, coutputSizeDataPtr, coutputSizeLen, indices) +} +func AtgFractionalMaxPool3dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, outputSizeData []int64, outputSizeLen int, indices Ctensor){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +C.atg_fractional_max_pool3d_backward_out(ptr, gradInput, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, coutputSizeDataPtr, coutputSizeLen, indices) +} +func AtgFractionalMaxPool3dOut(ptr *Ctensor, output Ctensor, indices Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, outputSizeData []int64, outputSizeLen int, randomSamples Ctensor){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +C.atg_fractional_max_pool3d_out(ptr, output, indices, self, ckernelSizeDataPtr, ckernelSizeLen, coutputSizeDataPtr, coutputSizeLen, randomSamples) +} +func AtgFrobeniusNorm(ptr *Ctensor, self Ctensor){ +C.atg_frobenius_norm(ptr, self) +} +func AtgFrobeniusNorm1(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32){ +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_frobenius_norm1(ptr, self, cdimDataPtr, cdimLen, ckeepdim) +} +func AtgFrobeniusNormOut(ptr *Ctensor, out Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32){ +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_frobenius_norm_out(ptr, out, self, cdimDataPtr, cdimLen, ckeepdim) +} +func AtgFromFile(ptr *Ctensor, filename string, shared int32, sizeVal int64, sizeNull int, optionsKind int32, optionsDevice int32){ +cfilename := C.CString(filename) +filenameLen := len(filename) +cfilenameLen := *(*C.int)(unsafe.Pointer(&filenameLen)) +cshared := *(*C.int)(unsafe.Pointer(&shared)) +csizeVal := *(*C.int64_t)(unsafe.Pointer(&sizeVal)) +csizeNull := *(*C.uint8_t)(unsafe.Pointer(&sizeNull)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_from_file(ptr, cfilename, cfilenameLen, cshared, csizeVal, csizeNull, coptionsKind, coptionsDevice) +} +func AtgFull(ptr *Ctensor, sizeData []int64, sizeLen int, fillValue Cscalar, optionsKind int32, optionsDevice int32){ +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_full(ptr, csizeDataPtr, csizeLen, fillValue , coptionsKind, coptionsDevice) +} +func AtgFullLike(ptr *Ctensor, self Ctensor, fillValue Cscalar){ +C.atg_full_like(ptr, self, fillValue ) +} +func AtgFullOut(ptr *Ctensor, out Ctensor, sizeData []int64, sizeLen int, fillValue Cscalar){ +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +C.atg_full_out(ptr, out, csizeDataPtr, csizeLen, fillValue ) +} +func AtgGather(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, sparseGrad int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +csparseGrad := *(*C.int)(unsafe.Pointer(&sparseGrad)) +C.atg_gather(ptr, self, cdim, index, csparseGrad) +} +func AtgGatherBackward(ptr *Ctensor, grad Ctensor, self Ctensor, dim int64, index Ctensor, sparseGrad int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +csparseGrad := *(*C.int)(unsafe.Pointer(&sparseGrad)) +C.atg_gather_backward(ptr, grad, self, cdim, index, csparseGrad) +} +func AtgGatherOut(ptr *Ctensor, out Ctensor, self Ctensor, dim int64, index Ctensor, sparseGrad int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +csparseGrad := *(*C.int)(unsafe.Pointer(&sparseGrad)) +C.atg_gather_out(ptr, out, self, cdim, index, csparseGrad) +} +func AtgGcd(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_gcd(ptr, self, other) +} +func AtgGcd_(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_gcd_(ptr, self, other) +} +func AtgGcdOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ +C.atg_gcd_out(ptr, out, self, other) +} +func AtgGe(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_ge(ptr, self, other ) +} +func AtgGe1(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_ge1(ptr, self, other) +} +func AtgGe_(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_ge_(ptr, self, other ) +} +func AtgGe1_(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_ge_1(ptr, self, other) +} +func AtgGeOut(ptr *Ctensor, out Ctensor, self Ctensor, other Cscalar){ +C.atg_ge_out(ptr, out, self, other ) +} +func AtgGeOut1(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ +C.atg_ge_out1(ptr, out, self, other) +} +func AtgGelu(ptr *Ctensor, self Ctensor){ +C.atg_gelu(ptr, self) +} +func AtgGeluBackward(ptr *Ctensor, grad Ctensor, self Ctensor){ +C.atg_gelu_backward(ptr, grad, self) +} +func AtgGeometric_(ptr *Ctensor, self Ctensor, p float64){ +cp := *(*C.double)(unsafe.Pointer(&p)) +C.atg_geometric_(ptr, self, cp) +} +func AtgGeqrf(ptr *Ctensor, self Ctensor){ +C.atg_geqrf(ptr, self) +} +func AtgGeqrfOut(ptr *Ctensor, a Ctensor, tau Ctensor, self Ctensor){ +C.atg_geqrf_out(ptr, a, tau, self) +} +func AtgGer(ptr *Ctensor, self Ctensor, vec2 Ctensor){ +C.atg_ger(ptr, self, vec2) +} +func AtgGerOut(ptr *Ctensor, out Ctensor, self Ctensor, vec2 Ctensor){ +C.atg_ger_out(ptr, out, self, vec2) +} +func AtgGlu(ptr *Ctensor, self Ctensor, dim int64){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_glu(ptr, self, cdim) +} +func AtgGluBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, dim int64){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_glu_backward(ptr, gradOutput, self, cdim) +} +func AtgGluBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, dim int64){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_glu_backward_out(ptr, gradInput, gradOutput, self, cdim) +} +func AtgGluOut(ptr *Ctensor, out Ctensor, self Ctensor, dim int64){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_glu_out(ptr, out, self, cdim) +} +func AtgGrad(ptr *Ctensor, self Ctensor){ +C.atg_grad(ptr, self) +} +func AtgGreater(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_greater(ptr, self, other ) +} +func AtgGreater1(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_greater1(ptr, self, other) +} +func AtgGreater_(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_greater_(ptr, self, other ) +} +func AtgGreater1_(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_greater_1(ptr, self, other) +} +func AtgGreaterEqual(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_greater_equal(ptr, self, other ) +} +func AtgGreaterEqual1(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_greater_equal1(ptr, self, other) +} +func AtgGreaterEqual_(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_greater_equal_(ptr, self, other ) +} +func AtgGreaterEqual1_(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_greater_equal_1(ptr, self, other) +} +func AtgGreaterEqualOut(ptr *Ctensor, out Ctensor, self Ctensor, other Cscalar){ +C.atg_greater_equal_out(ptr, out, self, other ) +} +func AtgGreaterEqualOut1(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ +C.atg_greater_equal_out1(ptr, out, self, other) +} +func AtgGreaterOut(ptr *Ctensor, out Ctensor, self Ctensor, other Cscalar){ +C.atg_greater_out(ptr, out, self, other ) +} +func AtgGreaterOut1(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ +C.atg_greater_out1(ptr, out, self, other) +} +func AtgGridSampler(ptr *Ctensor, input Ctensor, grid Ctensor, interpolationMode int64, paddingMode int64, alignCorners int32){ +cinterpolationMode := *(*C.int64_t)(unsafe.Pointer(&interpolationMode)) +cpaddingMode := *(*C.int64_t)(unsafe.Pointer(&paddingMode)) +calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) +C.atg_grid_sampler(ptr, input, grid, cinterpolationMode, cpaddingMode, calignCorners) +} +func AtgGridSampler2d(ptr *Ctensor, input Ctensor, grid Ctensor, interpolationMode int64, paddingMode int64, alignCorners int32){ +cinterpolationMode := *(*C.int64_t)(unsafe.Pointer(&interpolationMode)) +cpaddingMode := *(*C.int64_t)(unsafe.Pointer(&paddingMode)) +calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) +C.atg_grid_sampler_2d(ptr, input, grid, cinterpolationMode, cpaddingMode, calignCorners) +} +func AtgGridSampler2dBackward(ptr *Ctensor, gradOutput Ctensor, input Ctensor, grid Ctensor, interpolationMode int64, paddingMode int64, alignCorners int32){ +cinterpolationMode := *(*C.int64_t)(unsafe.Pointer(&interpolationMode)) +cpaddingMode := *(*C.int64_t)(unsafe.Pointer(&paddingMode)) +calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) +C.atg_grid_sampler_2d_backward(ptr, gradOutput, input, grid, cinterpolationMode, cpaddingMode, calignCorners) +} +func AtgGridSampler3d(ptr *Ctensor, input Ctensor, grid Ctensor, interpolationMode int64, paddingMode int64, alignCorners int32){ +cinterpolationMode := *(*C.int64_t)(unsafe.Pointer(&interpolationMode)) +cpaddingMode := *(*C.int64_t)(unsafe.Pointer(&paddingMode)) +calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) +C.atg_grid_sampler_3d(ptr, input, grid, cinterpolationMode, cpaddingMode, calignCorners) +} +func AtgGridSampler3dBackward(ptr *Ctensor, gradOutput Ctensor, input Ctensor, grid Ctensor, interpolationMode int64, paddingMode int64, alignCorners int32){ +cinterpolationMode := *(*C.int64_t)(unsafe.Pointer(&interpolationMode)) +cpaddingMode := *(*C.int64_t)(unsafe.Pointer(&paddingMode)) +calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) +C.atg_grid_sampler_3d_backward(ptr, gradOutput, input, grid, cinterpolationMode, cpaddingMode, calignCorners) +} +func AtgGroupNorm(ptr *Ctensor, input Ctensor, numGroups int64, weight Ctensor, bias Ctensor, eps float64, cudnnEnabled int32){ +cnumGroups := *(*C.int64_t)(unsafe.Pointer(&numGroups)) +ceps := *(*C.double)(unsafe.Pointer(&eps)) +ccudnnEnabled := *(*C.int)(unsafe.Pointer(&cudnnEnabled)) +C.atg_group_norm(ptr, input, cnumGroups, weight, bias, ceps, ccudnnEnabled) +} +func AtgGru(ptr *Ctensor, input Ctensor, hx Ctensor, paramsData []Ctensor, paramsLen int, hasBiases int32, numLayers int64, dropout float64, train int32, bidirectional int32, batchFirst int32){ +cparamsDataPtr := (*Ctensor)(unsafe.Pointer(¶msData[0])) +cparamsLen := *(*C.int)(unsafe.Pointer(¶msLen)) +chasBiases := *(*C.int)(unsafe.Pointer(&hasBiases)) +cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) +cdropout := *(*C.double)(unsafe.Pointer(&dropout)) +ctrain := *(*C.int)(unsafe.Pointer(&train)) +cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) +cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst)) +C.atg_gru(ptr, input, hx, cparamsDataPtr, cparamsLen, chasBiases, cnumLayers, cdropout, ctrain, cbidirectional, cbatchFirst) +} +func AtgGru1(ptr *Ctensor, data Ctensor, batchSizes Ctensor, hx Ctensor, paramsData []Ctensor, paramsLen int, hasBiases int32, numLayers int64, dropout float64, train int32, bidirectional int32){ +cparamsDataPtr := (*Ctensor)(unsafe.Pointer(¶msData[0])) +cparamsLen := *(*C.int)(unsafe.Pointer(¶msLen)) +chasBiases := *(*C.int)(unsafe.Pointer(&hasBiases)) +cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) +cdropout := *(*C.double)(unsafe.Pointer(&dropout)) +ctrain := *(*C.int)(unsafe.Pointer(&train)) +cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) +C.atg_gru1(ptr, data, batchSizes, hx, cparamsDataPtr, cparamsLen, chasBiases, cnumLayers, cdropout, ctrain, cbidirectional) +} +func AtgGruCell(ptr *Ctensor, input Ctensor, hx Ctensor, wIh Ctensor, wHh Ctensor, bIh Ctensor, bHh Ctensor){ +C.atg_gru_cell(ptr, input, hx, wIh, wHh, bIh, bHh) +} +func AtgGt(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_gt(ptr, self, other ) +} +func AtgGt1(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_gt1(ptr, self, other) +} +func AtgGt_(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_gt_(ptr, self, other ) +} +func AtgGt1_(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_gt_1(ptr, self, other) +} +func AtgGtOut(ptr *Ctensor, out Ctensor, self Ctensor, other Cscalar){ +C.atg_gt_out(ptr, out, self, other ) +} +func AtgGtOut1(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ +C.atg_gt_out1(ptr, out, self, other) +} +func AtgHammingWindow(ptr *Ctensor, windowLength int64, optionsKind int32, optionsDevice int32){ +cwindowLength := *(*C.int64_t)(unsafe.Pointer(&windowLength)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_hamming_window(ptr, cwindowLength, coptionsKind, coptionsDevice) +} +func AtgHammingWindow1(ptr *Ctensor, windowLength int64, periodic int32, optionsKind int32, optionsDevice int32){ +cwindowLength := *(*C.int64_t)(unsafe.Pointer(&windowLength)) +cperiodic := *(*C.int)(unsafe.Pointer(&periodic)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_hamming_window1(ptr, cwindowLength, cperiodic, coptionsKind, coptionsDevice) +} +func AtgHammingWindow2(ptr *Ctensor, windowLength int64, periodic int32, alpha float64, optionsKind int32, optionsDevice int32){ +cwindowLength := *(*C.int64_t)(unsafe.Pointer(&windowLength)) +cperiodic := *(*C.int)(unsafe.Pointer(&periodic)) +calpha := *(*C.double)(unsafe.Pointer(&alpha)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_hamming_window2(ptr, cwindowLength, cperiodic, calpha, coptionsKind, coptionsDevice) +} +func AtgHammingWindow3(ptr *Ctensor, windowLength int64, periodic int32, alpha float64, beta float64, optionsKind int32, optionsDevice int32){ +cwindowLength := *(*C.int64_t)(unsafe.Pointer(&windowLength)) +cperiodic := *(*C.int)(unsafe.Pointer(&periodic)) +calpha := *(*C.double)(unsafe.Pointer(&alpha)) +cbeta := *(*C.double)(unsafe.Pointer(&beta)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_hamming_window3(ptr, cwindowLength, cperiodic, calpha, cbeta, coptionsKind, coptionsDevice) +} +func AtgHannWindow(ptr *Ctensor, windowLength int64, optionsKind int32, optionsDevice int32){ +cwindowLength := *(*C.int64_t)(unsafe.Pointer(&windowLength)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_hann_window(ptr, cwindowLength, coptionsKind, coptionsDevice) +} +func AtgHannWindow1(ptr *Ctensor, windowLength int64, periodic int32, optionsKind int32, optionsDevice int32){ +cwindowLength := *(*C.int64_t)(unsafe.Pointer(&windowLength)) +cperiodic := *(*C.int)(unsafe.Pointer(&periodic)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_hann_window1(ptr, cwindowLength, cperiodic, coptionsKind, coptionsDevice) +} +func AtgHardshrink(ptr *Ctensor, self Ctensor){ +C.atg_hardshrink(ptr, self) +} +func AtgHardshrinkBackward(ptr *Ctensor, gradOut Ctensor, self Ctensor, lambd Cscalar){ +C.atg_hardshrink_backward(ptr, gradOut, self, lambd ) +} +func AtgHardsigmoid(ptr *Ctensor, self Ctensor){ +C.atg_hardsigmoid(ptr, self) +} +func AtgHardsigmoid_(ptr *Ctensor, self Ctensor){ +C.atg_hardsigmoid_(ptr, self) +} +func AtgHardsigmoidBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor){ +C.atg_hardsigmoid_backward(ptr, gradOutput, self) +} +func AtgHardsigmoidOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_hardsigmoid_out(ptr, out, self) +} +func AtgHardswish(ptr *Ctensor, self Ctensor){ +C.atg_hardswish(ptr, self) +} +func AtgHardswish_(ptr *Ctensor, self Ctensor){ +C.atg_hardswish_(ptr, self) +} +func AtgHardswishBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor){ +C.atg_hardswish_backward(ptr, gradOutput, self) +} +func AtgHardswishOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_hardswish_out(ptr, out, self) +} +func AtgHardtanh(ptr *Ctensor, self Ctensor){ +C.atg_hardtanh(ptr, self) +} +func AtgHardtanh_(ptr *Ctensor, self Ctensor){ +C.atg_hardtanh_(ptr, self) +} +func AtgHardtanhBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, minVal Cscalar, maxVal Cscalar){ +C.atg_hardtanh_backward(ptr, gradOutput, self, minVal , maxVal ) +} +func AtgHardtanhBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, minVal Cscalar, maxVal Cscalar){ +C.atg_hardtanh_backward_out(ptr, gradInput, gradOutput, self, minVal , maxVal ) +} +func AtgHardtanhOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_hardtanh_out(ptr, out, self) +} +func AtgHeaviside(ptr *Ctensor, self Ctensor, values Ctensor){ +C.atg_heaviside(ptr, self, values) +} +func AtgHeaviside_(ptr *Ctensor, self Ctensor, values Ctensor){ +C.atg_heaviside_(ptr, self, values) +} +func AtgHeavisideOut(ptr *Ctensor, out Ctensor, self Ctensor, values Ctensor){ +C.atg_heaviside_out(ptr, out, self, values) +} +func AtgHingeEmbeddingLoss(ptr *Ctensor, self Ctensor, target Ctensor, margin float64, reduction int64){ +cmargin := *(*C.double)(unsafe.Pointer(&margin)) +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +C.atg_hinge_embedding_loss(ptr, self, target, cmargin, creduction) +} +func AtgHistc(ptr *Ctensor, self Ctensor, bins int64){ +cbins := *(*C.int64_t)(unsafe.Pointer(&bins)) +C.atg_histc(ptr, self, cbins) +} +func AtgHistcOut(ptr *Ctensor, out Ctensor, self Ctensor, bins int64){ +cbins := *(*C.int64_t)(unsafe.Pointer(&bins)) +C.atg_histc_out(ptr, out, self, cbins) +} +func AtgHspmm(ptr *Ctensor, mat1 Ctensor, mat2 Ctensor){ +C.atg_hspmm(ptr, mat1, mat2) +} +func AtgHspmmOut(ptr *Ctensor, out Ctensor, mat1 Ctensor, mat2 Ctensor){ +C.atg_hspmm_out(ptr, out, mat1, mat2) +} +func AtgHstack(ptr *Ctensor, tensorsData []Ctensor, tensorsLen int){ +ctensorsDataPtr := (*Ctensor)(unsafe.Pointer(&tensorsData[0])) +ctensorsLen := *(*C.int)(unsafe.Pointer(&tensorsLen)) +C.atg_hstack(ptr, ctensorsDataPtr, ctensorsLen) +} +func AtgHstackOut(ptr *Ctensor, out Ctensor, tensorsData []Ctensor, tensorsLen int){ +ctensorsDataPtr := (*Ctensor)(unsafe.Pointer(&tensorsData[0])) +ctensorsLen := *(*C.int)(unsafe.Pointer(&tensorsLen)) +C.atg_hstack_out(ptr, out, ctensorsDataPtr, ctensorsLen) +} +func AtgHypot(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_hypot(ptr, self, other) +} +func AtgHypot_(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_hypot_(ptr, self, other) +} +func AtgHypotOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ +C.atg_hypot_out(ptr, out, self, other) +} +func AtgI0(ptr *Ctensor, self Ctensor){ +C.atg_i0(ptr, self) +} +func AtgI0_(ptr *Ctensor, self Ctensor){ +C.atg_i0_(ptr, self) +} +func AtgI0Out(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_i0_out(ptr, out, self) +} +func AtgIfft(ptr *Ctensor, self Ctensor, signalNdim int64, normalized int32){ +csignalNdim := *(*C.int64_t)(unsafe.Pointer(&signalNdim)) +cnormalized := *(*C.int)(unsafe.Pointer(&normalized)) +C.atg_ifft(ptr, self, csignalNdim, cnormalized) +} +func AtgIm2col(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, dilationData []int64, dilationLen int, paddingData []int64, paddingLen int, strideData []int64, strideLen int){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +C.atg_im2col(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cdilationDataPtr, cdilationLen, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen) +} +func AtgIm2colBackward(ptr *Ctensor, gradOutput Ctensor, inputSizeData []int64, inputSizeLen int, kernelSizeData []int64, kernelSizeLen int, dilationData []int64, dilationLen int, paddingData []int64, paddingLen int, strideData []int64, strideLen int){ +cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) +cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +C.atg_im2col_backward(ptr, gradOutput, cinputSizeDataPtr, cinputSizeLen, ckernelSizeDataPtr, ckernelSizeLen, cdilationDataPtr, cdilationLen, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen) +} +func AtgIm2colBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, inputSizeData []int64, inputSizeLen int, kernelSizeData []int64, kernelSizeLen int, dilationData []int64, dilationLen int, paddingData []int64, paddingLen int, strideData []int64, strideLen int){ +cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) +cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +C.atg_im2col_backward_out(ptr, gradInput, gradOutput, cinputSizeDataPtr, cinputSizeLen, ckernelSizeDataPtr, ckernelSizeLen, cdilationDataPtr, cdilationLen, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen) +} +func AtgIm2colOut(ptr *Ctensor, out Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, dilationData []int64, dilationLen int, paddingData []int64, paddingLen int, strideData []int64, strideLen int){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +C.atg_im2col_out(ptr, out, self, ckernelSizeDataPtr, ckernelSizeLen, cdilationDataPtr, cdilationLen, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen) +} +func AtgImag(ptr *Ctensor, self Ctensor){ +C.atg_imag(ptr, self) +} +func AtgIndex(ptr *Ctensor, self Ctensor, indicesData []Ctensor, indicesLen int){ +cindicesDataPtr := (*Ctensor)(unsafe.Pointer(&indicesData[0])) +cindicesLen := *(*C.int)(unsafe.Pointer(&indicesLen)) +C.atg_index(ptr, self, cindicesDataPtr, cindicesLen) +} +func AtgIndexAdd(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, source Ctensor){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_index_add(ptr, self, cdim, index, source) +} +func AtgIndexAdd_(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, source Ctensor){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_index_add_(ptr, self, cdim, index, source) +} +func AtgIndexCopy(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, source Ctensor){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_index_copy(ptr, self, cdim, index, source) +} +func AtgIndexCopy_(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, source Ctensor){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_index_copy_(ptr, self, cdim, index, source) +} +func AtgIndexFill(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, value Cscalar){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_index_fill(ptr, self, cdim, index, value ) +} +func AtgIndexFill1(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, value Ctensor){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_index_fill1(ptr, self, cdim, index, value) +} +func AtgIndexFill_(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, value Cscalar){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_index_fill_(ptr, self, cdim, index, value ) +} +func AtgIndexFill1_(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, value Ctensor){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_index_fill_1(ptr, self, cdim, index, value) +} +func AtgIndexPut(ptr *Ctensor, self Ctensor, indicesData []Ctensor, indicesLen int, values Ctensor, accumulate int32){ +cindicesDataPtr := (*Ctensor)(unsafe.Pointer(&indicesData[0])) +cindicesLen := *(*C.int)(unsafe.Pointer(&indicesLen)) +caccumulate := *(*C.int)(unsafe.Pointer(&accumulate)) +C.atg_index_put(ptr, self, cindicesDataPtr, cindicesLen, values, caccumulate) +} +func AtgIndexPut_(ptr *Ctensor, self Ctensor, indicesData []Ctensor, indicesLen int, values Ctensor, accumulate int32){ +cindicesDataPtr := (*Ctensor)(unsafe.Pointer(&indicesData[0])) +cindicesLen := *(*C.int)(unsafe.Pointer(&indicesLen)) +caccumulate := *(*C.int)(unsafe.Pointer(&accumulate)) +C.atg_index_put_(ptr, self, cindicesDataPtr, cindicesLen, values, caccumulate) +} +func AtgIndexSelect(ptr *Ctensor, self Ctensor, dim int64, index Ctensor){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_index_select(ptr, self, cdim, index) +} +func AtgIndexSelectBackward(ptr *Ctensor, grad Ctensor, selfSizesData []int64, selfSizesLen int, dim int64, index Ctensor){ +cselfSizesDataPtr := (*C.int64_t)(unsafe.Pointer(&selfSizesData[0])) +cselfSizesLen := *(*C.int)(unsafe.Pointer(&selfSizesLen)) +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_index_select_backward(ptr, grad, cselfSizesDataPtr, cselfSizesLen, cdim, index) +} +func AtgIndexSelectOut(ptr *Ctensor, out Ctensor, self Ctensor, dim int64, index Ctensor){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_index_select_out(ptr, out, self, cdim, index) +} +func AtgIndices(ptr *Ctensor, self Ctensor){ +C.atg_indices(ptr, self) +} +func AtgInfinitelyDifferentiableGeluBackward(ptr *Ctensor, grad Ctensor, self Ctensor){ +C.atg_infinitely_differentiable_gelu_backward(ptr, grad, self) +} +func AtgInstanceNorm(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, runningMean Ctensor, runningVar Ctensor, useInputStats int32, momentum float64, eps float64, cudnnEnabled int32){ +cuseInputStats := *(*C.int)(unsafe.Pointer(&useInputStats)) +cmomentum := *(*C.double)(unsafe.Pointer(&momentum)) +ceps := *(*C.double)(unsafe.Pointer(&eps)) +ccudnnEnabled := *(*C.int)(unsafe.Pointer(&cudnnEnabled)) +C.atg_instance_norm(ptr, input, weight, bias, runningMean, runningVar, cuseInputStats, cmomentum, ceps, ccudnnEnabled) +} +func AtgIntRepr(ptr *Ctensor, self Ctensor){ +C.atg_int_repr(ptr, self) +} +func AtgInverse(ptr *Ctensor, self Ctensor){ +C.atg_inverse(ptr, self) +} +func AtgInverseOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_inverse_out(ptr, out, self) +} +func AtgIrfft(ptr *Ctensor, self Ctensor, signalNdim int64, normalized int32, onesided int32, signalSizesData []int64, signalSizesLen int){ +csignalNdim := *(*C.int64_t)(unsafe.Pointer(&signalNdim)) +cnormalized := *(*C.int)(unsafe.Pointer(&normalized)) +conesided := *(*C.int)(unsafe.Pointer(&onesided)) +csignalSizesDataPtr := (*C.int64_t)(unsafe.Pointer(&signalSizesData[0])) +csignalSizesLen := *(*C.int)(unsafe.Pointer(&signalSizesLen)) +C.atg_irfft(ptr, self, csignalNdim, cnormalized, conesided, csignalSizesDataPtr, csignalSizesLen) +} +func AtgIsclose(ptr *Ctensor, self Ctensor, other Ctensor, rtol float64, atol float64, equalNan int32){ +crtol := *(*C.double)(unsafe.Pointer(&rtol)) +catol := *(*C.double)(unsafe.Pointer(&atol)) +cequalNan := *(*C.int)(unsafe.Pointer(&equalNan)) +C.atg_isclose(ptr, self, other, crtol, catol, cequalNan) +} +func AtgIsfinite(ptr *Ctensor, self Ctensor){ +C.atg_isfinite(ptr, self) +} +func AtgIsinf(ptr *Ctensor, self Ctensor){ +C.atg_isinf(ptr, self) +} +func AtgIsnan(ptr *Ctensor, self Ctensor){ +C.atg_isnan(ptr, self) +} +func AtgIsneginf(ptr *Ctensor, self Ctensor){ +C.atg_isneginf(ptr, self) +} +func AtgIsneginfOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_isneginf_out(ptr, out, self) +} +func AtgIsposinf(ptr *Ctensor, self Ctensor){ +C.atg_isposinf(ptr, self) +} +func AtgIsposinfOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_isposinf_out(ptr, out, self) +} +func AtgIsreal(ptr *Ctensor, self Ctensor){ +C.atg_isreal(ptr, self) +} +func AtgIstft(ptr *Ctensor, self Ctensor, nFft int64, hopLengthVal int64, hopLengthNull int, winLengthVal int64, winLengthNull int, window Ctensor, center int32, normalized int32, onesided int32, lengthVal int64, lengthNull int, returnComplex int32){ +cnFft := *(*C.int64_t)(unsafe.Pointer(&nFft)) +chopLengthVal := *(*C.int64_t)(unsafe.Pointer(&hopLengthVal)) +chopLengthNull := *(*C.uint8_t)(unsafe.Pointer(&hopLengthNull)) +cwinLengthVal := *(*C.int64_t)(unsafe.Pointer(&winLengthVal)) +cwinLengthNull := *(*C.uint8_t)(unsafe.Pointer(&winLengthNull)) +ccenter := *(*C.int)(unsafe.Pointer(¢er)) +cnormalized := *(*C.int)(unsafe.Pointer(&normalized)) +conesided := *(*C.int)(unsafe.Pointer(&onesided)) +clengthVal := *(*C.int64_t)(unsafe.Pointer(&lengthVal)) +clengthNull := *(*C.uint8_t)(unsafe.Pointer(&lengthNull)) +creturnComplex := *(*C.int)(unsafe.Pointer(&returnComplex)) +C.atg_istft(ptr, self, cnFft, chopLengthVal, chopLengthNull, cwinLengthVal, cwinLengthNull, window, ccenter, cnormalized, conesided, clengthVal, clengthNull, creturnComplex) +} +func AtgKaiserWindow(ptr *Ctensor, windowLength int64, optionsKind int32, optionsDevice int32){ +cwindowLength := *(*C.int64_t)(unsafe.Pointer(&windowLength)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_kaiser_window(ptr, cwindowLength, coptionsKind, coptionsDevice) +} +func AtgKaiserWindow1(ptr *Ctensor, windowLength int64, periodic int32, optionsKind int32, optionsDevice int32){ +cwindowLength := *(*C.int64_t)(unsafe.Pointer(&windowLength)) +cperiodic := *(*C.int)(unsafe.Pointer(&periodic)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_kaiser_window1(ptr, cwindowLength, cperiodic, coptionsKind, coptionsDevice) +} +func AtgKaiserWindow2(ptr *Ctensor, windowLength int64, periodic int32, beta float64, optionsKind int32, optionsDevice int32){ +cwindowLength := *(*C.int64_t)(unsafe.Pointer(&windowLength)) +cperiodic := *(*C.int)(unsafe.Pointer(&periodic)) +cbeta := *(*C.double)(unsafe.Pointer(&beta)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_kaiser_window2(ptr, cwindowLength, cperiodic, cbeta, coptionsKind, coptionsDevice) +} +func AtgKlDiv(ptr *Ctensor, self Ctensor, target Ctensor, reduction int64, logTarget int32){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +clogTarget := *(*C.int)(unsafe.Pointer(&logTarget)) +C.atg_kl_div(ptr, self, target, creduction, clogTarget) +} +func AtgKlDivBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, reduction int64, logTarget int32){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +clogTarget := *(*C.int)(unsafe.Pointer(&logTarget)) +C.atg_kl_div_backward(ptr, gradOutput, self, target, creduction, clogTarget) +} +func AtgKthvalue(ptr *Ctensor, self Ctensor, k int64, dim int64, keepdim int32){ +ck := *(*C.int64_t)(unsafe.Pointer(&k)) +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_kthvalue(ptr, self, ck, cdim, ckeepdim) +} +func AtgKthvalueOut(ptr *Ctensor, values Ctensor, indices Ctensor, self Ctensor, k int64, dim int64, keepdim int32){ +ck := *(*C.int64_t)(unsafe.Pointer(&k)) +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_kthvalue_out(ptr, values, indices, self, ck, cdim, ckeepdim) +} +func AtgL1Loss(ptr *Ctensor, self Ctensor, target Ctensor, reduction int64){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +C.atg_l1_loss(ptr, self, target, creduction) +} +func AtgL1LossBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, reduction int64){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +C.atg_l1_loss_backward(ptr, gradOutput, self, target, creduction) +} +func AtgL1LossBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, reduction int64){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +C.atg_l1_loss_backward_out(ptr, gradInput, gradOutput, self, target, creduction) +} +func AtgL1LossOut(ptr *Ctensor, out Ctensor, self Ctensor, target Ctensor, reduction int64){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +C.atg_l1_loss_out(ptr, out, self, target, creduction) +} +func AtgLayerNorm(ptr *Ctensor, input Ctensor, normalizedShapeData []int64, normalizedShapeLen int, weight Ctensor, bias Ctensor, eps float64, cudnnEnable int32){ +cnormalizedShapeDataPtr := (*C.int64_t)(unsafe.Pointer(&normalizedShapeData[0])) +cnormalizedShapeLen := *(*C.int)(unsafe.Pointer(&normalizedShapeLen)) +ceps := *(*C.double)(unsafe.Pointer(&eps)) +ccudnnEnable := *(*C.int)(unsafe.Pointer(&cudnnEnable)) +C.atg_layer_norm(ptr, input, cnormalizedShapeDataPtr, cnormalizedShapeLen, weight, bias, ceps, ccudnnEnable) +} +func AtgLcm(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_lcm(ptr, self, other) +} +func AtgLcm_(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_lcm_(ptr, self, other) +} +func AtgLcmOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ +C.atg_lcm_out(ptr, out, self, other) +} +func AtgLe(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_le(ptr, self, other ) +} +func AtgLe1(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_le1(ptr, self, other) +} +func AtgLe_(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_le_(ptr, self, other ) +} +func AtgLe1_(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_le_1(ptr, self, other) +} +func AtgLeOut(ptr *Ctensor, out Ctensor, self Ctensor, other Cscalar){ +C.atg_le_out(ptr, out, self, other ) +} +func AtgLeOut1(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ +C.atg_le_out1(ptr, out, self, other) +} +func AtgLeakyRelu(ptr *Ctensor, self Ctensor){ +C.atg_leaky_relu(ptr, self) +} +func AtgLeakyRelu_(ptr *Ctensor, self Ctensor){ +C.atg_leaky_relu_(ptr, self) +} +func AtgLeakyReluBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, negativeSlope Cscalar, selfIsResult int32){ +cselfIsResult := *(*C.int)(unsafe.Pointer(&selfIsResult)) +C.atg_leaky_relu_backward(ptr, gradOutput, self, negativeSlope , cselfIsResult) +} +func AtgLeakyReluOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_leaky_relu_out(ptr, out, self) +} +func AtgLerp(ptr *Ctensor, self Ctensor, end Ctensor, weight Cscalar){ +C.atg_lerp(ptr, self, end, weight ) +} +func AtgLerp1(ptr *Ctensor, self Ctensor, end Ctensor, weight Ctensor){ +C.atg_lerp1(ptr, self, end, weight) +} +func AtgLerp_(ptr *Ctensor, self Ctensor, end Ctensor, weight Cscalar){ +C.atg_lerp_(ptr, self, end, weight ) +} +func AtgLerp1_(ptr *Ctensor, self Ctensor, end Ctensor, weight Ctensor){ +C.atg_lerp_1(ptr, self, end, weight) +} +func AtgLerpOut(ptr *Ctensor, out Ctensor, self Ctensor, end Ctensor, weight Cscalar){ +C.atg_lerp_out(ptr, out, self, end, weight ) +} +func AtgLerpOut1(ptr *Ctensor, out Ctensor, self Ctensor, end Ctensor, weight Ctensor){ +C.atg_lerp_out1(ptr, out, self, end, weight) +} +func AtgLess(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_less(ptr, self, other ) +} +func AtgLess1(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_less1(ptr, self, other) +} +func AtgLess_(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_less_(ptr, self, other ) +} +func AtgLess1_(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_less_1(ptr, self, other) +} +func AtgLessEqual(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_less_equal(ptr, self, other ) +} +func AtgLessEqual1(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_less_equal1(ptr, self, other) +} +func AtgLessEqual_(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_less_equal_(ptr, self, other ) +} +func AtgLessEqual1_(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_less_equal_1(ptr, self, other) +} +func AtgLessEqualOut(ptr *Ctensor, out Ctensor, self Ctensor, other Cscalar){ +C.atg_less_equal_out(ptr, out, self, other ) +} +func AtgLessEqualOut1(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ +C.atg_less_equal_out1(ptr, out, self, other) +} +func AtgLessOut(ptr *Ctensor, out Ctensor, self Ctensor, other Cscalar){ +C.atg_less_out(ptr, out, self, other ) +} +func AtgLessOut1(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ +C.atg_less_out1(ptr, out, self, other) +} +func AtgLgamma(ptr *Ctensor, self Ctensor){ +C.atg_lgamma(ptr, self) +} +func AtgLgamma_(ptr *Ctensor, self Ctensor){ +C.atg_lgamma_(ptr, self) +} +func AtgLgammaOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_lgamma_out(ptr, out, self) +} +func AtgLinalgDet(ptr *Ctensor, self Ctensor){ +C.atg_linalg_det(ptr, self) +} +func AtgLinalgNorm(ptr *Ctensor, self Ctensor, ord Cscalar, dimData []int64, dimLen int, keepdim int32, dtype int32){ +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +cdtype := *(*C.int)(unsafe.Pointer(&dtype)) +C.atg_linalg_norm(ptr, self, ord , cdimDataPtr, cdimLen, ckeepdim, cdtype) +} +func AtgLinalgNorm1(ptr *Ctensor, self Ctensor, ord string, dimData []int64, dimLen int, keepdim int32, dtype int32){ +cord := C.CString(ord) +ordLen := len(ord) +cordLen := *(*C.int)(unsafe.Pointer(&ordLen)) +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +cdtype := *(*C.int)(unsafe.Pointer(&dtype)) +C.atg_linalg_norm1(ptr, self, cord, cordLen, cdimDataPtr, cdimLen, ckeepdim, cdtype) +} +func AtgLinalgNormOut(ptr *Ctensor, out Ctensor, self Ctensor, ord Cscalar, dimData []int64, dimLen int, keepdim int32, dtype int32){ +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +cdtype := *(*C.int)(unsafe.Pointer(&dtype)) +C.atg_linalg_norm_out(ptr, out, self, ord , cdimDataPtr, cdimLen, ckeepdim, cdtype) +} +func AtgLinalgNormOut1(ptr *Ctensor, out Ctensor, self Ctensor, ord string, dimData []int64, dimLen int, keepdim int32, dtype int32){ +cord := C.CString(ord) +ordLen := len(ord) +cordLen := *(*C.int)(unsafe.Pointer(&ordLen)) +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +cdtype := *(*C.int)(unsafe.Pointer(&dtype)) +C.atg_linalg_norm_out1(ptr, out, self, cord, cordLen, cdimDataPtr, cdimLen, ckeepdim, cdtype) +} +func AtgLinear(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor){ +C.atg_linear(ptr, input, weight, bias) +} +func AtgLinspace(ptr *Ctensor, start Cscalar, end Cscalar, stepsVal int64, stepsNull int, optionsKind int32, optionsDevice int32){ +cstepsVal := *(*C.int64_t)(unsafe.Pointer(&stepsVal)) +cstepsNull := *(*C.uint8_t)(unsafe.Pointer(&stepsNull)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_linspace(ptr, start , end , cstepsVal, cstepsNull, coptionsKind, coptionsDevice) +} +func AtgLinspaceOut(ptr *Ctensor, out Ctensor, start Cscalar, end Cscalar, stepsVal int64, stepsNull int){ +cstepsVal := *(*C.int64_t)(unsafe.Pointer(&stepsVal)) +cstepsNull := *(*C.uint8_t)(unsafe.Pointer(&stepsNull)) +C.atg_linspace_out(ptr, out, start , end , cstepsVal, cstepsNull) +} +func AtgLog(ptr *Ctensor, self Ctensor){ +C.atg_log(ptr, self) +} +func AtgLog10(ptr *Ctensor, self Ctensor){ +C.atg_log10(ptr, self) +} +func AtgLog10_(ptr *Ctensor, self Ctensor){ +C.atg_log10_(ptr, self) +} +func AtgLog10Out(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_log10_out(ptr, out, self) +} +func AtgLog1p(ptr *Ctensor, self Ctensor){ +C.atg_log1p(ptr, self) +} +func AtgLog1p_(ptr *Ctensor, self Ctensor){ +C.atg_log1p_(ptr, self) +} +func AtgLog1pOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_log1p_out(ptr, out, self) +} +func AtgLog2(ptr *Ctensor, self Ctensor){ +C.atg_log2(ptr, self) +} +func AtgLog2_(ptr *Ctensor, self Ctensor){ +C.atg_log2_(ptr, self) +} +func AtgLog2Out(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_log2_out(ptr, out, self) +} +func AtgLog_(ptr *Ctensor, self Ctensor){ +C.atg_log_(ptr, self) +} +func AtgLogNormal_(ptr *Ctensor, self Ctensor, mean float64, std float64){ +cmean := *(*C.double)(unsafe.Pointer(&mean)) +cstd := *(*C.double)(unsafe.Pointer(&std)) +C.atg_log_normal_(ptr, self, cmean, cstd) +} +func AtgLogOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_log_out(ptr, out, self) +} +func AtgLogSigmoid(ptr *Ctensor, self Ctensor){ +C.atg_log_sigmoid(ptr, self) +} +func AtgLogSigmoidBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, buffer Ctensor){ +C.atg_log_sigmoid_backward(ptr, gradOutput, self, buffer) +} +func AtgLogSigmoidBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, buffer Ctensor){ +C.atg_log_sigmoid_backward_out(ptr, gradInput, gradOutput, self, buffer) +} +func AtgLogSigmoidOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_log_sigmoid_out(ptr, out, self) +} +func AtgLogSoftmax(ptr *Ctensor, self Ctensor, dim int64, dtype int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +cdtype := *(*C.int)(unsafe.Pointer(&dtype)) +C.atg_log_softmax(ptr, self, cdim, cdtype) +} +func AtgLogaddexp(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_logaddexp(ptr, self, other) +} +func AtgLogaddexp2(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_logaddexp2(ptr, self, other) +} +func AtgLogaddexp2Out(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ +C.atg_logaddexp2_out(ptr, out, self, other) +} +func AtgLogaddexpOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ +C.atg_logaddexp_out(ptr, out, self, other) +} +func AtgLogcumsumexp(ptr *Ctensor, self Ctensor, dim int64){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_logcumsumexp(ptr, self, cdim) +} +func AtgLogcumsumexpOut(ptr *Ctensor, out Ctensor, self Ctensor, dim int64){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_logcumsumexp_out(ptr, out, self, cdim) +} +func AtgLogdet(ptr *Ctensor, self Ctensor){ +C.atg_logdet(ptr, self) +} +func AtgLogicalAnd(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_logical_and(ptr, self, other) +} +func AtgLogicalAnd_(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_logical_and_(ptr, self, other) +} +func AtgLogicalAndOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ +C.atg_logical_and_out(ptr, out, self, other) +} +func AtgLogicalNot(ptr *Ctensor, self Ctensor){ +C.atg_logical_not(ptr, self) +} +func AtgLogicalNot_(ptr *Ctensor, self Ctensor){ +C.atg_logical_not_(ptr, self) +} +func AtgLogicalNotOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_logical_not_out(ptr, out, self) +} +func AtgLogicalOr(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_logical_or(ptr, self, other) +} +func AtgLogicalOr_(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_logical_or_(ptr, self, other) +} +func AtgLogicalOrOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ +C.atg_logical_or_out(ptr, out, self, other) +} +func AtgLogicalXor(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_logical_xor(ptr, self, other) +} +func AtgLogicalXor_(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_logical_xor_(ptr, self, other) +} +func AtgLogicalXorOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ +C.atg_logical_xor_out(ptr, out, self, other) +} +func AtgLogit(ptr *Ctensor, self Ctensor, epsVal float64, epsNull int){ +cepsVal := *(*C.double)(unsafe.Pointer(&epsVal)) +cepsNull := *(*C.uint8_t)(unsafe.Pointer(&epsNull)) +C.atg_logit(ptr, self, cepsVal, cepsNull) +} +func AtgLogit_(ptr *Ctensor, self Ctensor, epsVal float64, epsNull int){ +cepsVal := *(*C.double)(unsafe.Pointer(&epsVal)) +cepsNull := *(*C.uint8_t)(unsafe.Pointer(&epsNull)) +C.atg_logit_(ptr, self, cepsVal, cepsNull) +} +func AtgLogitBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, epsVal float64, epsNull int){ +cepsVal := *(*C.double)(unsafe.Pointer(&epsVal)) +cepsNull := *(*C.uint8_t)(unsafe.Pointer(&epsNull)) +C.atg_logit_backward(ptr, gradOutput, self, cepsVal, cepsNull) +} +func AtgLogitBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, epsVal float64, epsNull int){ +cepsVal := *(*C.double)(unsafe.Pointer(&epsVal)) +cepsNull := *(*C.uint8_t)(unsafe.Pointer(&epsNull)) +C.atg_logit_backward_out(ptr, gradInput, gradOutput, self, cepsVal, cepsNull) +} +func AtgLogitOut(ptr *Ctensor, out Ctensor, self Ctensor, epsVal float64, epsNull int){ +cepsVal := *(*C.double)(unsafe.Pointer(&epsVal)) +cepsNull := *(*C.uint8_t)(unsafe.Pointer(&epsNull)) +C.atg_logit_out(ptr, out, self, cepsVal, cepsNull) +} +func AtgLogspace(ptr *Ctensor, start Cscalar, end Cscalar, stepsVal int64, stepsNull int, base float64, optionsKind int32, optionsDevice int32){ +cstepsVal := *(*C.int64_t)(unsafe.Pointer(&stepsVal)) +cstepsNull := *(*C.uint8_t)(unsafe.Pointer(&stepsNull)) +cbase := *(*C.double)(unsafe.Pointer(&base)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_logspace(ptr, start , end , cstepsVal, cstepsNull, cbase, coptionsKind, coptionsDevice) +} +func AtgLogspaceOut(ptr *Ctensor, out Ctensor, start Cscalar, end Cscalar, stepsVal int64, stepsNull int, base float64){ +cstepsVal := *(*C.int64_t)(unsafe.Pointer(&stepsVal)) +cstepsNull := *(*C.uint8_t)(unsafe.Pointer(&stepsNull)) +cbase := *(*C.double)(unsafe.Pointer(&base)) +C.atg_logspace_out(ptr, out, start , end , cstepsVal, cstepsNull, cbase) +} +func AtgLogsumexp(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32){ +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_logsumexp(ptr, self, cdimDataPtr, cdimLen, ckeepdim) +} +func AtgLogsumexpOut(ptr *Ctensor, out Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32){ +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_logsumexp_out(ptr, out, self, cdimDataPtr, cdimLen, ckeepdim) +} +func AtgLstm(ptr *Ctensor, input Ctensor, hxData []Ctensor, hxLen int, paramsData []Ctensor, paramsLen int, hasBiases int32, numLayers int64, dropout float64, train int32, bidirectional int32, batchFirst int32){ +chxDataPtr := (*Ctensor)(unsafe.Pointer(&hxData[0])) +chxLen := *(*C.int)(unsafe.Pointer(&hxLen)) +cparamsDataPtr := (*Ctensor)(unsafe.Pointer(¶msData[0])) +cparamsLen := *(*C.int)(unsafe.Pointer(¶msLen)) +chasBiases := *(*C.int)(unsafe.Pointer(&hasBiases)) +cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) +cdropout := *(*C.double)(unsafe.Pointer(&dropout)) +ctrain := *(*C.int)(unsafe.Pointer(&train)) +cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) +cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst)) +C.atg_lstm(ptr, input, chxDataPtr, chxLen, cparamsDataPtr, cparamsLen, chasBiases, cnumLayers, cdropout, ctrain, cbidirectional, cbatchFirst) +} +func AtgLstm1(ptr *Ctensor, data Ctensor, batchSizes Ctensor, hxData []Ctensor, hxLen int, paramsData []Ctensor, paramsLen int, hasBiases int32, numLayers int64, dropout float64, train int32, bidirectional int32){ +chxDataPtr := (*Ctensor)(unsafe.Pointer(&hxData[0])) +chxLen := *(*C.int)(unsafe.Pointer(&hxLen)) +cparamsDataPtr := (*Ctensor)(unsafe.Pointer(¶msData[0])) +cparamsLen := *(*C.int)(unsafe.Pointer(¶msLen)) +chasBiases := *(*C.int)(unsafe.Pointer(&hasBiases)) +cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) +cdropout := *(*C.double)(unsafe.Pointer(&dropout)) +ctrain := *(*C.int)(unsafe.Pointer(&train)) +cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) +C.atg_lstm1(ptr, data, batchSizes, chxDataPtr, chxLen, cparamsDataPtr, cparamsLen, chasBiases, cnumLayers, cdropout, ctrain, cbidirectional) +} +func AtgLstmCell(ptr *Ctensor, input Ctensor, hxData []Ctensor, hxLen int, wIh Ctensor, wHh Ctensor, bIh Ctensor, bHh Ctensor){ +chxDataPtr := (*Ctensor)(unsafe.Pointer(&hxData[0])) +chxLen := *(*C.int)(unsafe.Pointer(&hxLen)) +C.atg_lstm_cell(ptr, input, chxDataPtr, chxLen, wIh, wHh, bIh, bHh) +} +func AtgLstsq(ptr *Ctensor, self Ctensor, a Ctensor){ +C.atg_lstsq(ptr, self, a) +} +func AtgLstsqOut(ptr *Ctensor, x Ctensor, qr Ctensor, self Ctensor, a Ctensor){ +C.atg_lstsq_out(ptr, x, qr, self, a) +} +func AtgLt(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_lt(ptr, self, other ) +} +func AtgLt1(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_lt1(ptr, self, other) +} +func AtgLt_(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_lt_(ptr, self, other ) +} +func AtgLt1_(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_lt_1(ptr, self, other) +} +func AtgLtOut(ptr *Ctensor, out Ctensor, self Ctensor, other Cscalar){ +C.atg_lt_out(ptr, out, self, other ) +} +func AtgLtOut1(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ +C.atg_lt_out1(ptr, out, self, other) +} +func AtgLuSolve(ptr *Ctensor, self Ctensor, lUData Ctensor, lUPivots Ctensor){ +C.atg_lu_solve(ptr, self, lUData, lUPivots) +} +func AtgLuSolveOut(ptr *Ctensor, out Ctensor, self Ctensor, lUData Ctensor, lUPivots Ctensor){ +C.atg_lu_solve_out(ptr, out, self, lUData, lUPivots) +} +func AtgMarginRankingLoss(ptr *Ctensor, input1 Ctensor, input2 Ctensor, target Ctensor, margin float64, reduction int64){ +cmargin := *(*C.double)(unsafe.Pointer(&margin)) +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +C.atg_margin_ranking_loss(ptr, input1, input2, target, cmargin, creduction) +} +func AtgMaskedFill(ptr *Ctensor, self Ctensor, mask Ctensor, value Cscalar){ +C.atg_masked_fill(ptr, self, mask, value ) +} +func AtgMaskedFill1(ptr *Ctensor, self Ctensor, mask Ctensor, value Ctensor){ +C.atg_masked_fill1(ptr, self, mask, value) +} +func AtgMaskedFill_(ptr *Ctensor, self Ctensor, mask Ctensor, value Cscalar){ +C.atg_masked_fill_(ptr, self, mask, value ) +} +func AtgMaskedFill1_(ptr *Ctensor, self Ctensor, mask Ctensor, value Ctensor){ +C.atg_masked_fill_1(ptr, self, mask, value) +} +func AtgMaskedScatter(ptr *Ctensor, self Ctensor, mask Ctensor, source Ctensor){ +C.atg_masked_scatter(ptr, self, mask, source) +} +func AtgMaskedScatter_(ptr *Ctensor, self Ctensor, mask Ctensor, source Ctensor){ +C.atg_masked_scatter_(ptr, self, mask, source) +} +func AtgMaskedSelect(ptr *Ctensor, self Ctensor, mask Ctensor){ +C.atg_masked_select(ptr, self, mask) +} +func AtgMaskedSelectBackward(ptr *Ctensor, grad Ctensor, input Ctensor, mask Ctensor){ +C.atg_masked_select_backward(ptr, grad, input, mask) +} +func AtgMaskedSelectOut(ptr *Ctensor, out Ctensor, self Ctensor, mask Ctensor){ +C.atg_masked_select_out(ptr, out, self, mask) +} +func AtgMatmul(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_matmul(ptr, self, other) +} +func AtgMatmulOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ +C.atg_matmul_out(ptr, out, self, other) +} +func AtgMatrixExp(ptr *Ctensor, self Ctensor){ +C.atg_matrix_exp(ptr, self) +} +func AtgMatrixExpBackward(ptr *Ctensor, self Ctensor, grad Ctensor){ +C.atg_matrix_exp_backward(ptr, self, grad) +} +func AtgMatrixPower(ptr *Ctensor, self Ctensor, n int64){ +cn := *(*C.int64_t)(unsafe.Pointer(&n)) +C.atg_matrix_power(ptr, self, cn) +} +func AtgMatrixRank(ptr *Ctensor, self Ctensor, symmetric int32){ +csymmetric := *(*C.int)(unsafe.Pointer(&symmetric)) +C.atg_matrix_rank(ptr, self, csymmetric) +} +func AtgMatrixRank1(ptr *Ctensor, self Ctensor, tol float64, symmetric int32){ +ctol := *(*C.double)(unsafe.Pointer(&tol)) +csymmetric := *(*C.int)(unsafe.Pointer(&symmetric)) +C.atg_matrix_rank1(ptr, self, ctol, csymmetric) +} +func AtgMax(ptr *Ctensor, self Ctensor){ +C.atg_max(ptr, self) +} +func AtgMax1(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_max1(ptr, self, other) +} +func AtgMax2(ptr *Ctensor, self Ctensor, dim int64, keepdim int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_max2(ptr, self, cdim, ckeepdim) +} +func AtgMaxOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ +C.atg_max_out(ptr, out, self, other) +} +func AtgMaxOut1(ptr *Ctensor, max Ctensor, maxValues Ctensor, self Ctensor, dim int64, keepdim int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_max_out1(ptr, max, maxValues, self, cdim, ckeepdim) +} +func AtgMaxPool1d(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) +C.atg_max_pool1d(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode) +} +func AtgMaxPool1dWithIndices(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) +C.atg_max_pool1d_with_indices(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode) +} +func AtgMaxPool2d(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) +C.atg_max_pool2d(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode) +} +func AtgMaxPool2dWithIndices(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) +C.atg_max_pool2d_with_indices(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode) +} +func AtgMaxPool2dWithIndicesBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32, indices Ctensor){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) +C.atg_max_pool2d_with_indices_backward(ptr, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode, indices) +} +func AtgMaxPool2dWithIndicesBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32, indices Ctensor){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) +C.atg_max_pool2d_with_indices_backward_out(ptr, gradInput, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode, indices) +} +func AtgMaxPool2dWithIndicesOut(ptr *Ctensor, out Ctensor, indices Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) +C.atg_max_pool2d_with_indices_out(ptr, out, indices, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode) +} +func AtgMaxPool3d(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) +C.atg_max_pool3d(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode) +} +func AtgMaxPool3dWithIndices(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) +C.atg_max_pool3d_with_indices(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode) +} +func AtgMaxPool3dWithIndicesBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32, indices Ctensor){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) +C.atg_max_pool3d_with_indices_backward(ptr, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode, indices) +} +func AtgMaxPool3dWithIndicesBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32, indices Ctensor){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) +C.atg_max_pool3d_with_indices_backward_out(ptr, gradInput, gradOutput, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode, indices) +} +func AtgMaxPool3dWithIndicesOut(ptr *Ctensor, out Ctensor, indices Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) +C.atg_max_pool3d_with_indices_out(ptr, out, indices, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode) +} +func AtgMaxUnpool2d(ptr *Ctensor, self Ctensor, indices Ctensor, outputSizeData []int64, outputSizeLen int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +C.atg_max_unpool2d(ptr, self, indices, coutputSizeDataPtr, coutputSizeLen) +} +func AtgMaxUnpool2dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, indices Ctensor, outputSizeData []int64, outputSizeLen int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +C.atg_max_unpool2d_backward(ptr, gradOutput, self, indices, coutputSizeDataPtr, coutputSizeLen) +} +func AtgMaxUnpool2dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, indices Ctensor, outputSizeData []int64, outputSizeLen int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +C.atg_max_unpool2d_backward_out(ptr, gradInput, gradOutput, self, indices, coutputSizeDataPtr, coutputSizeLen) +} +func AtgMaxUnpool2dOut(ptr *Ctensor, out Ctensor, self Ctensor, indices Ctensor, outputSizeData []int64, outputSizeLen int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +C.atg_max_unpool2d_out(ptr, out, self, indices, coutputSizeDataPtr, coutputSizeLen) +} +func AtgMaxUnpool3d(ptr *Ctensor, self Ctensor, indices Ctensor, outputSizeData []int64, outputSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +C.atg_max_unpool3d(ptr, self, indices, coutputSizeDataPtr, coutputSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen) +} +func AtgMaxUnpool3dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, indices Ctensor, outputSizeData []int64, outputSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +C.atg_max_unpool3d_backward(ptr, gradOutput, self, indices, coutputSizeDataPtr, coutputSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen) +} +func AtgMaxUnpool3dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, indices Ctensor, outputSizeData []int64, outputSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +C.atg_max_unpool3d_backward_out(ptr, gradInput, gradOutput, self, indices, coutputSizeDataPtr, coutputSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen) +} +func AtgMaxUnpool3dOut(ptr *Ctensor, out Ctensor, self Ctensor, indices Ctensor, outputSizeData []int64, outputSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +C.atg_max_unpool3d_out(ptr, out, self, indices, coutputSizeDataPtr, coutputSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen) +} +func AtgMaximum(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_maximum(ptr, self, other) +} +func AtgMaximumOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ +C.atg_maximum_out(ptr, out, self, other) +} +func AtgMean(ptr *Ctensor, self Ctensor, dtype int32){ +cdtype := *(*C.int)(unsafe.Pointer(&dtype)) +C.atg_mean(ptr, self, cdtype) +} +func AtgMean1(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32, dtype int32){ +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +cdtype := *(*C.int)(unsafe.Pointer(&dtype)) +C.atg_mean1(ptr, self, cdimDataPtr, cdimLen, ckeepdim, cdtype) +} +func AtgMeanOut(ptr *Ctensor, out Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32, dtype int32){ +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +cdtype := *(*C.int)(unsafe.Pointer(&dtype)) +C.atg_mean_out(ptr, out, self, cdimDataPtr, cdimLen, ckeepdim, cdtype) +} +func AtgMedian(ptr *Ctensor, self Ctensor){ +C.atg_median(ptr, self) +} +func AtgMedian1(ptr *Ctensor, self Ctensor, dim int64, keepdim int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_median1(ptr, self, cdim, ckeepdim) +} +func AtgMedianOut(ptr *Ctensor, values Ctensor, indices Ctensor, self Ctensor, dim int64, keepdim int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_median_out(ptr, values, indices, self, cdim, ckeepdim) } -func AtgWhere1(ptr *Ctensor, condition Ctensor, self Ctensor, other Ctensor) { - C.atg_where1(ptr, condition, self, other) +func AtgMin(ptr *Ctensor, self Ctensor){ +C.atg_min(ptr, self) } -func AtgZero_(ptr *Ctensor, self Ctensor) { - C.atg_zero_(ptr, self) +func AtgMin1(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_min1(ptr, self, other) } -func AtgZeros(ptr *Ctensor, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32) { - csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) - csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) - coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) - coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) - C.atg_zeros(ptr, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice) +func AtgMin2(ptr *Ctensor, self Ctensor, dim int64, keepdim int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_min2(ptr, self, cdim, ckeepdim) } -func AtgZerosLike(ptr *Ctensor, self Ctensor) { - C.atg_zeros_like(ptr, self) +func AtgMinOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ +C.atg_min_out(ptr, out, self, other) } -func AtgZerosOut(ptr *Ctensor, out Ctensor, sizeData []int64, sizeLen int) { - csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) - csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) - C.atg_zeros_out(ptr, out, csizeDataPtr, csizeLen) +func AtgMinOut1(ptr *Ctensor, min Ctensor, minIndices Ctensor, self Ctensor, dim int64, keepdim int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_min_out1(ptr, min, minIndices, self, cdim, ckeepdim) +} +func AtgMinimum(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_minimum(ptr, self, other) +} +func AtgMinimumOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ +C.atg_minimum_out(ptr, out, self, other) +} +func AtgMiopenBatchNorm(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, runningMean Ctensor, runningVar Ctensor, training int32, exponentialAverageFactor float64, epsilon float64){ +ctraining := *(*C.int)(unsafe.Pointer(&training)) +cexponentialAverageFactor := *(*C.double)(unsafe.Pointer(&exponentialAverageFactor)) +cepsilon := *(*C.double)(unsafe.Pointer(&epsilon)) +C.atg_miopen_batch_norm(ptr, input, weight, bias, runningMean, runningVar, ctraining, cexponentialAverageFactor, cepsilon) +} +func AtgMiopenBatchNormBackward(ptr *Ctensor, input Ctensor, gradOutput Ctensor, weight Ctensor, runningMean Ctensor, runningVar Ctensor, saveMean Ctensor, saveVar Ctensor, epsilon float64){ +cepsilon := *(*C.double)(unsafe.Pointer(&epsilon)) +C.atg_miopen_batch_norm_backward(ptr, input, gradOutput, weight, runningMean, runningVar, saveMean, saveVar, cepsilon) +} +func AtgMiopenConvolution(ptr *Ctensor, self Ctensor, weight Ctensor, bias Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){ +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) +cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) +cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) +C.atg_miopen_convolution(ptr, self, weight, bias, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) +} +func AtgMiopenConvolutionBackwardBias(ptr *Ctensor, gradOutput Ctensor){ +C.atg_miopen_convolution_backward_bias(ptr, gradOutput) +} +func AtgMiopenConvolutionBackwardInput(ptr *Ctensor, selfSizeData []int64, selfSizeLen int, gradOutput Ctensor, weight Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){ +cselfSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&selfSizeData[0])) +cselfSizeLen := *(*C.int)(unsafe.Pointer(&selfSizeLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) +cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) +cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) +C.atg_miopen_convolution_backward_input(ptr, cselfSizeDataPtr, cselfSizeLen, gradOutput, weight, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) +} +func AtgMiopenConvolutionBackwardWeight(ptr *Ctensor, weightSizeData []int64, weightSizeLen int, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){ +cweightSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&weightSizeData[0])) +cweightSizeLen := *(*C.int)(unsafe.Pointer(&weightSizeLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) +cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) +cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) +C.atg_miopen_convolution_backward_weight(ptr, cweightSizeDataPtr, cweightSizeLen, gradOutput, self, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) +} +func AtgMiopenConvolutionTranspose(ptr *Ctensor, self Ctensor, weight Ctensor, bias Ctensor, paddingData []int64, paddingLen int, outputPaddingData []int64, outputPaddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){ +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) +coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) +cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) +cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) +C.atg_miopen_convolution_transpose(ptr, self, weight, bias, cpaddingDataPtr, cpaddingLen, coutputPaddingDataPtr, coutputPaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) +} +func AtgMiopenConvolutionTransposeBackwardInput(ptr *Ctensor, gradOutput Ctensor, weight Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){ +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) +cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) +cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) +C.atg_miopen_convolution_transpose_backward_input(ptr, gradOutput, weight, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) +} +func AtgMiopenConvolutionTransposeBackwardWeight(ptr *Ctensor, weightSizeData []int64, weightSizeLen int, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){ +cweightSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&weightSizeData[0])) +cweightSizeLen := *(*C.int)(unsafe.Pointer(&weightSizeLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) +cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) +cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) +C.atg_miopen_convolution_transpose_backward_weight(ptr, cweightSizeDataPtr, cweightSizeLen, gradOutput, self, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) +} +func AtgMiopenDepthwiseConvolution(ptr *Ctensor, self Ctensor, weight Ctensor, bias Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){ +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) +cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) +cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) +C.atg_miopen_depthwise_convolution(ptr, self, weight, bias, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) +} +func AtgMiopenDepthwiseConvolutionBackwardInput(ptr *Ctensor, selfSizeData []int64, selfSizeLen int, gradOutput Ctensor, weight Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){ +cselfSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&selfSizeData[0])) +cselfSizeLen := *(*C.int)(unsafe.Pointer(&selfSizeLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) +cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) +cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) +C.atg_miopen_depthwise_convolution_backward_input(ptr, cselfSizeDataPtr, cselfSizeLen, gradOutput, weight, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) +} +func AtgMiopenDepthwiseConvolutionBackwardWeight(ptr *Ctensor, weightSizeData []int64, weightSizeLen int, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, benchmark int32, deterministic int32){ +cweightSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&weightSizeData[0])) +cweightSizeLen := *(*C.int)(unsafe.Pointer(&weightSizeLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) +cbenchmark := *(*C.int)(unsafe.Pointer(&benchmark)) +cdeterministic := *(*C.int)(unsafe.Pointer(&deterministic)) +C.atg_miopen_depthwise_convolution_backward_weight(ptr, cweightSizeDataPtr, cweightSizeLen, gradOutput, self, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbenchmark, cdeterministic) +} +func AtgMiopenRnn(ptr *Ctensor, input Ctensor, weightData []Ctensor, weightLen int, weightStride0 int64, hx Ctensor, cx Ctensor, mode int64, hiddenSize int64, numLayers int64, batchFirst int32, dropout float64, train int32, bidirectional int32, batchSizesData []int64, batchSizesLen int, dropoutState Ctensor){ +cweightDataPtr := (*Ctensor)(unsafe.Pointer(&weightData[0])) +cweightLen := *(*C.int)(unsafe.Pointer(&weightLen)) +cweightStride0 := *(*C.int64_t)(unsafe.Pointer(&weightStride0)) +cmode := *(*C.int64_t)(unsafe.Pointer(&mode)) +chiddenSize := *(*C.int64_t)(unsafe.Pointer(&hiddenSize)) +cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) +cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst)) +cdropout := *(*C.double)(unsafe.Pointer(&dropout)) +ctrain := *(*C.int)(unsafe.Pointer(&train)) +cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) +cbatchSizesDataPtr := (*C.int64_t)(unsafe.Pointer(&batchSizesData[0])) +cbatchSizesLen := *(*C.int)(unsafe.Pointer(&batchSizesLen)) +C.atg_miopen_rnn(ptr, input, cweightDataPtr, cweightLen, cweightStride0, hx, cx, cmode, chiddenSize, cnumLayers, cbatchFirst, cdropout, ctrain, cbidirectional, cbatchSizesDataPtr, cbatchSizesLen, dropoutState) +} +func AtgMkldnnAdaptiveAvgPool2d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +C.atg_mkldnn_adaptive_avg_pool2d(ptr, self, coutputSizeDataPtr, coutputSizeLen) +} +func AtgMkldnnConvolution(ptr *Ctensor, self Ctensor, weight Ctensor, bias Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64){ +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) +C.atg_mkldnn_convolution(ptr, self, weight, bias, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups) +} +func AtgMkldnnConvolutionBackwardInput(ptr *Ctensor, selfSizeData []int64, selfSizeLen int, gradOutput Ctensor, weight Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, biasDefined int32){ +cselfSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&selfSizeData[0])) +cselfSizeLen := *(*C.int)(unsafe.Pointer(&selfSizeLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) +cbiasDefined := *(*C.int)(unsafe.Pointer(&biasDefined)) +C.atg_mkldnn_convolution_backward_input(ptr, cselfSizeDataPtr, cselfSizeLen, gradOutput, weight, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbiasDefined) +} +func AtgMkldnnConvolutionBackwardWeights(ptr *Ctensor, weightSizeData []int64, weightSizeLen int, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64, biasDefined int32){ +cweightSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&weightSizeData[0])) +cweightSizeLen := *(*C.int)(unsafe.Pointer(&weightSizeLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) +cbiasDefined := *(*C.int)(unsafe.Pointer(&biasDefined)) +C.atg_mkldnn_convolution_backward_weights(ptr, cweightSizeDataPtr, cweightSizeLen, gradOutput, self, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups, cbiasDefined) +} +func AtgMkldnnLinear(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor){ +C.atg_mkldnn_linear(ptr, input, weight, bias) +} +func AtgMkldnnMaxPool2d(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) +C.atg_mkldnn_max_pool2d(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode) +} +func AtgMkldnnMaxPool3d(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) +C.atg_mkldnn_max_pool3d(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode) +} +func AtgMkldnnReorderConv2dWeight(ptr *Ctensor, self Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64){ +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) +C.atg_mkldnn_reorder_conv2d_weight(ptr, self, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups) +} +func AtgMkldnnReorderConv3dWeight(ptr *Ctensor, self Ctensor, paddingData []int64, paddingLen int, strideData []int64, strideLen int, dilationData []int64, dilationLen int, groups int64){ +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cgroups := *(*C.int64_t)(unsafe.Pointer(&groups)) +C.atg_mkldnn_reorder_conv3d_weight(ptr, self, cpaddingDataPtr, cpaddingLen, cstrideDataPtr, cstrideLen, cdilationDataPtr, cdilationLen, cgroups) +} +func AtgMm(ptr *Ctensor, self Ctensor, mat2 Ctensor){ +C.atg_mm(ptr, self, mat2) +} +func AtgMmOut(ptr *Ctensor, out Ctensor, self Ctensor, mat2 Ctensor){ +C.atg_mm_out(ptr, out, self, mat2) +} +func AtgMode(ptr *Ctensor, self Ctensor, dim int64, keepdim int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_mode(ptr, self, cdim, ckeepdim) +} +func AtgModeOut(ptr *Ctensor, values Ctensor, indices Ctensor, self Ctensor, dim int64, keepdim int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_mode_out(ptr, values, indices, self, cdim, ckeepdim) +} +func AtgMovedim(ptr *Ctensor, self Ctensor, sourceData []int64, sourceLen int, destinationData []int64, destinationLen int){ +csourceDataPtr := (*C.int64_t)(unsafe.Pointer(&sourceData[0])) +csourceLen := *(*C.int)(unsafe.Pointer(&sourceLen)) +cdestinationDataPtr := (*C.int64_t)(unsafe.Pointer(&destinationData[0])) +cdestinationLen := *(*C.int)(unsafe.Pointer(&destinationLen)) +C.atg_movedim(ptr, self, csourceDataPtr, csourceLen, cdestinationDataPtr, cdestinationLen) +} +func AtgMovedim1(ptr *Ctensor, self Ctensor, source int64, destination int64){ +csource := *(*C.int64_t)(unsafe.Pointer(&source)) +cdestination := *(*C.int64_t)(unsafe.Pointer(&destination)) +C.atg_movedim1(ptr, self, csource, cdestination) +} +func AtgMseLoss(ptr *Ctensor, self Ctensor, target Ctensor, reduction int64){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +C.atg_mse_loss(ptr, self, target, creduction) +} +func AtgMseLossBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, reduction int64){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +C.atg_mse_loss_backward(ptr, gradOutput, self, target, creduction) +} +func AtgMseLossBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, reduction int64){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +C.atg_mse_loss_backward_out(ptr, gradInput, gradOutput, self, target, creduction) +} +func AtgMseLossOut(ptr *Ctensor, out Ctensor, self Ctensor, target Ctensor, reduction int64){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +C.atg_mse_loss_out(ptr, out, self, target, creduction) +} +func AtgMul(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_mul(ptr, self, other) +} +func AtgMul1(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_mul1(ptr, self, other ) +} +func AtgMul_(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_mul_(ptr, self, other) +} +func AtgMul1_(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_mul_1(ptr, self, other ) +} +func AtgMulOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ +C.atg_mul_out(ptr, out, self, other) +} +func AtgMultiMarginLossBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, p Cscalar, margin Cscalar, weight Ctensor, reduction int64){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +C.atg_multi_margin_loss_backward(ptr, gradOutput, self, target, p , margin , weight, creduction) +} +func AtgMultiMarginLossBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, p Cscalar, margin Cscalar, weight Ctensor, reduction int64){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +C.atg_multi_margin_loss_backward_out(ptr, gradInput, gradOutput, self, target, p , margin , weight, creduction) +} +func AtgMultilabelMarginLoss(ptr *Ctensor, self Ctensor, target Ctensor, reduction int64){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +C.atg_multilabel_margin_loss(ptr, self, target, creduction) +} +func AtgMultilabelMarginLossBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, reduction int64, isTarget Ctensor){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +C.atg_multilabel_margin_loss_backward(ptr, gradOutput, self, target, creduction, isTarget) +} +func AtgMultilabelMarginLossBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, reduction int64, isTarget Ctensor){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +C.atg_multilabel_margin_loss_backward_out(ptr, gradInput, gradOutput, self, target, creduction, isTarget) +} +func AtgMultilabelMarginLossOut(ptr *Ctensor, out Ctensor, self Ctensor, target Ctensor, reduction int64){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +C.atg_multilabel_margin_loss_out(ptr, out, self, target, creduction) +} +func AtgMultinomial(ptr *Ctensor, self Ctensor, numSamples int64, replacement int32){ +cnumSamples := *(*C.int64_t)(unsafe.Pointer(&numSamples)) +creplacement := *(*C.int)(unsafe.Pointer(&replacement)) +C.atg_multinomial(ptr, self, cnumSamples, creplacement) +} +func AtgMultinomialOut(ptr *Ctensor, out Ctensor, self Ctensor, numSamples int64, replacement int32){ +cnumSamples := *(*C.int64_t)(unsafe.Pointer(&numSamples)) +creplacement := *(*C.int)(unsafe.Pointer(&replacement)) +C.atg_multinomial_out(ptr, out, self, cnumSamples, creplacement) +} +func AtgMultiply(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_multiply(ptr, self, other) +} +func AtgMultiply1(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_multiply1(ptr, self, other ) +} +func AtgMultiply_(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_multiply_(ptr, self, other) +} +func AtgMultiply1_(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_multiply_1(ptr, self, other ) +} +func AtgMultiplyOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ +C.atg_multiply_out(ptr, out, self, other) +} +func AtgMv(ptr *Ctensor, self Ctensor, vec Ctensor){ +C.atg_mv(ptr, self, vec) +} +func AtgMvOut(ptr *Ctensor, out Ctensor, self Ctensor, vec Ctensor){ +C.atg_mv_out(ptr, out, self, vec) +} +func AtgMvlgamma(ptr *Ctensor, self Ctensor, p int64){ +cp := *(*C.int64_t)(unsafe.Pointer(&p)) +C.atg_mvlgamma(ptr, self, cp) +} +func AtgMvlgamma_(ptr *Ctensor, self Ctensor, p int64){ +cp := *(*C.int64_t)(unsafe.Pointer(&p)) +C.atg_mvlgamma_(ptr, self, cp) +} +func AtgNanquantile(ptr *Ctensor, self Ctensor, q float64, dimVal int64, dimNull int, keepdim int32){ +cq := *(*C.double)(unsafe.Pointer(&q)) +cdimVal := *(*C.int64_t)(unsafe.Pointer(&dimVal)) +cdimNull := *(*C.uint8_t)(unsafe.Pointer(&dimNull)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_nanquantile(ptr, self, cq, cdimVal, cdimNull, ckeepdim) +} +func AtgNanquantile1(ptr *Ctensor, self Ctensor, q Ctensor, dimVal int64, dimNull int, keepdim int32){ +cdimVal := *(*C.int64_t)(unsafe.Pointer(&dimVal)) +cdimNull := *(*C.uint8_t)(unsafe.Pointer(&dimNull)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_nanquantile1(ptr, self, q, cdimVal, cdimNull, ckeepdim) +} +func AtgNanquantileOut(ptr *Ctensor, out Ctensor, self Ctensor, q float64, dimVal int64, dimNull int, keepdim int32){ +cq := *(*C.double)(unsafe.Pointer(&q)) +cdimVal := *(*C.int64_t)(unsafe.Pointer(&dimVal)) +cdimNull := *(*C.uint8_t)(unsafe.Pointer(&dimNull)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_nanquantile_out(ptr, out, self, cq, cdimVal, cdimNull, ckeepdim) +} +func AtgNanquantileOut1(ptr *Ctensor, out Ctensor, self Ctensor, q Ctensor, dimVal int64, dimNull int, keepdim int32){ +cdimVal := *(*C.int64_t)(unsafe.Pointer(&dimVal)) +cdimNull := *(*C.uint8_t)(unsafe.Pointer(&dimNull)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_nanquantile_out1(ptr, out, self, q, cdimVal, cdimNull, ckeepdim) +} +func AtgNansum(ptr *Ctensor, self Ctensor, dtype int32){ +cdtype := *(*C.int)(unsafe.Pointer(&dtype)) +C.atg_nansum(ptr, self, cdtype) +} +func AtgNansum1(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32, dtype int32){ +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +cdtype := *(*C.int)(unsafe.Pointer(&dtype)) +C.atg_nansum1(ptr, self, cdimDataPtr, cdimLen, ckeepdim, cdtype) +} +func AtgNansumOut(ptr *Ctensor, out Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32, dtype int32){ +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +cdtype := *(*C.int)(unsafe.Pointer(&dtype)) +C.atg_nansum_out(ptr, out, self, cdimDataPtr, cdimLen, ckeepdim, cdtype) +} +func AtgNarrow(ptr *Ctensor, self Ctensor, dim int64, start int64, length int64){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +cstart := *(*C.int64_t)(unsafe.Pointer(&start)) +clength := *(*C.int64_t)(unsafe.Pointer(&length)) +C.atg_narrow(ptr, self, cdim, cstart, clength) +} +func AtgNarrow1(ptr *Ctensor, self Ctensor, dim int64, start Ctensor, length int64){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +clength := *(*C.int64_t)(unsafe.Pointer(&length)) +C.atg_narrow1(ptr, self, cdim, start, clength) +} +func AtgNarrowCopy(ptr *Ctensor, self Ctensor, dim int64, start int64, length int64){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +cstart := *(*C.int64_t)(unsafe.Pointer(&start)) +clength := *(*C.int64_t)(unsafe.Pointer(&length)) +C.atg_narrow_copy(ptr, self, cdim, cstart, clength) +} +func AtgNativeBatchNorm(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, runningMean Ctensor, runningVar Ctensor, training int32, momentum float64, eps float64){ +ctraining := *(*C.int)(unsafe.Pointer(&training)) +cmomentum := *(*C.double)(unsafe.Pointer(&momentum)) +ceps := *(*C.double)(unsafe.Pointer(&eps)) +C.atg_native_batch_norm(ptr, input, weight, bias, runningMean, runningVar, ctraining, cmomentum, ceps) +} +func AtgNativeBatchNormOut(ptr *Ctensor, out Ctensor, saveMean Ctensor, saveInvstd Ctensor, input Ctensor, weight Ctensor, bias Ctensor, runningMean Ctensor, runningVar Ctensor, training int32, momentum float64, eps float64){ +ctraining := *(*C.int)(unsafe.Pointer(&training)) +cmomentum := *(*C.double)(unsafe.Pointer(&momentum)) +ceps := *(*C.double)(unsafe.Pointer(&eps)) +C.atg_native_batch_norm_out(ptr, out, saveMean, saveInvstd, input, weight, bias, runningMean, runningVar, ctraining, cmomentum, ceps) +} +func AtgNativeGroupNorm(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, n int64, c int64, hxW int64, group int64, eps float64){ +cn := *(*C.int64_t)(unsafe.Pointer(&n)) +cc := *(*C.int64_t)(unsafe.Pointer(&c)) +chxW := *(*C.int64_t)(unsafe.Pointer(&hxW)) +cgroup := *(*C.int64_t)(unsafe.Pointer(&group)) +ceps := *(*C.double)(unsafe.Pointer(&eps)) +C.atg_native_group_norm(ptr, input, weight, bias, cn, cc, chxW, cgroup, ceps) +} +func AtgNativeLayerNorm(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, m int64, n int64, eps float64){ +cm := *(*C.int64_t)(unsafe.Pointer(&m)) +cn := *(*C.int64_t)(unsafe.Pointer(&n)) +ceps := *(*C.double)(unsafe.Pointer(&eps)) +C.atg_native_layer_norm(ptr, input, weight, bias, cm, cn, ceps) +} +func AtgNativeNorm(ptr *Ctensor, self Ctensor){ +C.atg_native_norm(ptr, self) +} +func AtgNativeNorm1(ptr *Ctensor, self Ctensor, p Cscalar, dimData []int64, dimLen int, keepdim int32, dtype int32){ +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +cdtype := *(*C.int)(unsafe.Pointer(&dtype)) +C.atg_native_norm1(ptr, self, p , cdimDataPtr, cdimLen, ckeepdim, cdtype) +} +func AtgNe(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_ne(ptr, self, other ) +} +func AtgNe1(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_ne1(ptr, self, other) +} +func AtgNe_(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_ne_(ptr, self, other ) +} +func AtgNe1_(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_ne_1(ptr, self, other) +} +func AtgNeOut(ptr *Ctensor, out Ctensor, self Ctensor, other Cscalar){ +C.atg_ne_out(ptr, out, self, other ) +} +func AtgNeOut1(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ +C.atg_ne_out1(ptr, out, self, other) +} +func AtgNeg(ptr *Ctensor, self Ctensor){ +C.atg_neg(ptr, self) +} +func AtgNeg_(ptr *Ctensor, self Ctensor){ +C.atg_neg_(ptr, self) +} +func AtgNegOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_neg_out(ptr, out, self) +} +func AtgNegative(ptr *Ctensor, self Ctensor){ +C.atg_negative(ptr, self) +} +func AtgNegative_(ptr *Ctensor, self Ctensor){ +C.atg_negative_(ptr, self) +} +func AtgNegativeOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_negative_out(ptr, out, self) +} +func AtgNewEmpty(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32){ +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_new_empty(ptr, self, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice) +} +func AtgNewFull(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int, fillValue Cscalar, optionsKind int32, optionsDevice int32){ +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_new_full(ptr, self, csizeDataPtr, csizeLen, fillValue , coptionsKind, coptionsDevice) +} +func AtgNewZeros(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32){ +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_new_zeros(ptr, self, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice) +} +func AtgNextafter(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_nextafter(ptr, self, other) +} +func AtgNextafter_(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_nextafter_(ptr, self, other) +} +func AtgNextafterOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ +C.atg_nextafter_out(ptr, out, self, other) +} +func AtgNllLoss(ptr *Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64, ignoreIndex int64){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +cignoreIndex := *(*C.int64_t)(unsafe.Pointer(&ignoreIndex)) +C.atg_nll_loss(ptr, self, target, weight, creduction, cignoreIndex) +} +func AtgNllLoss2d(ptr *Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64, ignoreIndex int64){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +cignoreIndex := *(*C.int64_t)(unsafe.Pointer(&ignoreIndex)) +C.atg_nll_loss2d(ptr, self, target, weight, creduction, cignoreIndex) +} +func AtgNllLoss2dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64, ignoreIndex int64, totalWeight Ctensor){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +cignoreIndex := *(*C.int64_t)(unsafe.Pointer(&ignoreIndex)) +C.atg_nll_loss2d_backward(ptr, gradOutput, self, target, weight, creduction, cignoreIndex, totalWeight) +} +func AtgNllLoss2dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64, ignoreIndex int64, totalWeight Ctensor){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +cignoreIndex := *(*C.int64_t)(unsafe.Pointer(&ignoreIndex)) +C.atg_nll_loss2d_backward_out(ptr, gradInput, gradOutput, self, target, weight, creduction, cignoreIndex, totalWeight) +} +func AtgNllLoss2dOut(ptr *Ctensor, out Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64, ignoreIndex int64){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +cignoreIndex := *(*C.int64_t)(unsafe.Pointer(&ignoreIndex)) +C.atg_nll_loss2d_out(ptr, out, self, target, weight, creduction, cignoreIndex) +} +func AtgNllLossBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64, ignoreIndex int64, totalWeight Ctensor){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +cignoreIndex := *(*C.int64_t)(unsafe.Pointer(&ignoreIndex)) +C.atg_nll_loss_backward(ptr, gradOutput, self, target, weight, creduction, cignoreIndex, totalWeight) +} +func AtgNllLossBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64, ignoreIndex int64, totalWeight Ctensor){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +cignoreIndex := *(*C.int64_t)(unsafe.Pointer(&ignoreIndex)) +C.atg_nll_loss_backward_out(ptr, gradInput, gradOutput, self, target, weight, creduction, cignoreIndex, totalWeight) +} +func AtgNllLossOut(ptr *Ctensor, out Ctensor, self Ctensor, target Ctensor, weight Ctensor, reduction int64, ignoreIndex int64){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +cignoreIndex := *(*C.int64_t)(unsafe.Pointer(&ignoreIndex)) +C.atg_nll_loss_out(ptr, out, self, target, weight, creduction, cignoreIndex) +} +func AtgNonzero(ptr *Ctensor, self Ctensor){ +C.atg_nonzero(ptr, self) +} + +func AtgNonzeroOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_nonzero_out(ptr, out, self) +} +func AtgNorm(ptr *Ctensor, self Ctensor){ +C.atg_norm(ptr, self) +} +func AtgNorm1(ptr *Ctensor, self Ctensor, p Cscalar, dtype int32){ +cdtype := *(*C.int)(unsafe.Pointer(&dtype)) +C.atg_norm1(ptr, self, p , cdtype) +} +func AtgNorm2(ptr *Ctensor, self Ctensor, p Cscalar, dimData []int64, dimLen int, keepdim int32){ +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_norm2(ptr, self, p , cdimDataPtr, cdimLen, ckeepdim) +} +func AtgNorm3(ptr *Ctensor, self Ctensor, p Cscalar, dimData []int64, dimLen int, keepdim int32, dtype int32){ +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +cdtype := *(*C.int)(unsafe.Pointer(&dtype)) +C.atg_norm3(ptr, self, p , cdimDataPtr, cdimLen, ckeepdim, cdtype) +} +func AtgNormExceptDim(ptr *Ctensor, v Ctensor, pow int64, dim int64){ +cpow := *(*C.int64_t)(unsafe.Pointer(&pow)) +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_norm_except_dim(ptr, v, cpow, cdim) +} +func AtgNormOut(ptr *Ctensor, out Ctensor, self Ctensor, p Cscalar, dimData []int64, dimLen int, keepdim int32){ +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_norm_out(ptr, out, self, p , cdimDataPtr, cdimLen, ckeepdim) +} +func AtgNormOut1(ptr *Ctensor, out Ctensor, self Ctensor, p Cscalar, dimData []int64, dimLen int, keepdim int32, dtype int32){ +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +cdtype := *(*C.int)(unsafe.Pointer(&dtype)) +C.atg_norm_out1(ptr, out, self, p , cdimDataPtr, cdimLen, ckeepdim, cdtype) +} +func AtgNormal_(ptr *Ctensor, self Ctensor, mean float64, std float64){ +cmean := *(*C.double)(unsafe.Pointer(&mean)) +cstd := *(*C.double)(unsafe.Pointer(&std)) +C.atg_normal_(ptr, self, cmean, cstd) +} +func AtgNormalOut(ptr *Ctensor, out Ctensor, mean Ctensor, std float64){ +cstd := *(*C.double)(unsafe.Pointer(&std)) +C.atg_normal_out(ptr, out, mean, cstd) +} +func AtgNormalOut1(ptr *Ctensor, out Ctensor, mean float64, std Ctensor){ +cmean := *(*C.double)(unsafe.Pointer(&mean)) +C.atg_normal_out1(ptr, out, cmean, std) +} +func AtgNormalOut2(ptr *Ctensor, out Ctensor, mean Ctensor, std Ctensor){ +C.atg_normal_out2(ptr, out, mean, std) +} +func AtgNormalOut3(ptr *Ctensor, out Ctensor, mean float64, std float64, sizeData []int64, sizeLen int){ +cmean := *(*C.double)(unsafe.Pointer(&mean)) +cstd := *(*C.double)(unsafe.Pointer(&std)) +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +C.atg_normal_out3(ptr, out, cmean, cstd, csizeDataPtr, csizeLen) +} +func AtgNotEqual(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_not_equal(ptr, self, other ) +} +func AtgNotEqual1(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_not_equal1(ptr, self, other) +} +func AtgNotEqual_(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_not_equal_(ptr, self, other ) +} +func AtgNotEqual1_(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_not_equal_1(ptr, self, other) +} +func AtgNotEqualOut(ptr *Ctensor, out Ctensor, self Ctensor, other Cscalar){ +C.atg_not_equal_out(ptr, out, self, other ) +} +func AtgNotEqualOut1(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ +C.atg_not_equal_out1(ptr, out, self, other) +} +func AtgNuclearNorm(ptr *Ctensor, self Ctensor, keepdim int32){ +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_nuclear_norm(ptr, self, ckeepdim) +} +func AtgNuclearNorm1(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32){ +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_nuclear_norm1(ptr, self, cdimDataPtr, cdimLen, ckeepdim) +} +func AtgNuclearNormOut(ptr *Ctensor, out Ctensor, self Ctensor, keepdim int32){ +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_nuclear_norm_out(ptr, out, self, ckeepdim) +} +func AtgNuclearNormOut1(ptr *Ctensor, out Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32){ +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_nuclear_norm_out1(ptr, out, self, cdimDataPtr, cdimLen, ckeepdim) +} +func AtgNumpyT(ptr *Ctensor, self Ctensor){ +C.atg_numpy_t(ptr, self) +} +func AtgOneHot(ptr *Ctensor, self Ctensor, numClasses int64){ +cnumClasses := *(*C.int64_t)(unsafe.Pointer(&numClasses)) +C.atg_one_hot(ptr, self, cnumClasses) +} +func AtgOnes(ptr *Ctensor, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32){ +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_ones(ptr, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice) +} +func AtgOnesLike(ptr *Ctensor, self Ctensor){ +C.atg_ones_like(ptr, self) +} +func AtgOnesOut(ptr *Ctensor, out Ctensor, sizeData []int64, sizeLen int){ +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +C.atg_ones_out(ptr, out, csizeDataPtr, csizeLen) +} +func AtgOrgqr(ptr *Ctensor, self Ctensor, input2 Ctensor){ +C.atg_orgqr(ptr, self, input2) +} +func AtgOrgqrOut(ptr *Ctensor, out Ctensor, self Ctensor, input2 Ctensor){ +C.atg_orgqr_out(ptr, out, self, input2) +} +func AtgOrmqr(ptr *Ctensor, self Ctensor, input2 Ctensor, input3 Ctensor, left int32, transpose int32){ +cleft := *(*C.int)(unsafe.Pointer(&left)) +ctranspose := *(*C.int)(unsafe.Pointer(&transpose)) +C.atg_ormqr(ptr, self, input2, input3, cleft, ctranspose) +} +func AtgOrmqrOut(ptr *Ctensor, out Ctensor, self Ctensor, input2 Ctensor, input3 Ctensor, left int32, transpose int32){ +cleft := *(*C.int)(unsafe.Pointer(&left)) +ctranspose := *(*C.int)(unsafe.Pointer(&transpose)) +C.atg_ormqr_out(ptr, out, self, input2, input3, cleft, ctranspose) +} +func AtgOuter(ptr *Ctensor, self Ctensor, vec2 Ctensor){ +C.atg_outer(ptr, self, vec2) +} +func AtgOuterOut(ptr *Ctensor, out Ctensor, self Ctensor, vec2 Ctensor){ +C.atg_outer_out(ptr, out, self, vec2) +} +func AtgPairwiseDistance(ptr *Ctensor, x1 Ctensor, x2 Ctensor, p float64, eps float64, keepdim int32){ +cp := *(*C.double)(unsafe.Pointer(&p)) +ceps := *(*C.double)(unsafe.Pointer(&eps)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_pairwise_distance(ptr, x1, x2, cp, ceps, ckeepdim) +} +func AtgPdist(ptr *Ctensor, self Ctensor, p float64){ +cp := *(*C.double)(unsafe.Pointer(&p)) +C.atg_pdist(ptr, self, cp) +} +func AtgPermute(ptr *Ctensor, self Ctensor, dimsData []int64, dimsLen int){ +cdimsDataPtr := (*C.int64_t)(unsafe.Pointer(&dimsData[0])) +cdimsLen := *(*C.int)(unsafe.Pointer(&dimsLen)) +C.atg_permute(ptr, self, cdimsDataPtr, cdimsLen) +} +func AtgPinMemory(ptr *Ctensor, self Ctensor){ +C.atg_pin_memory(ptr, self) +} +func AtgPinverse(ptr *Ctensor, self Ctensor, rcond float64){ +crcond := *(*C.double)(unsafe.Pointer(&rcond)) +C.atg_pinverse(ptr, self, crcond) +} +func AtgPixelShuffle(ptr *Ctensor, self Ctensor, upscaleFactor int64){ +cupscaleFactor := *(*C.int64_t)(unsafe.Pointer(&upscaleFactor)) +C.atg_pixel_shuffle(ptr, self, cupscaleFactor) +} +func AtgPoisson(ptr *Ctensor, self Ctensor){ +C.atg_poisson(ptr, self) +} +func AtgPoissonNllLoss(ptr *Ctensor, input Ctensor, target Ctensor, logInput int32, full int32, eps float64, reduction int64){ +clogInput := *(*C.int)(unsafe.Pointer(&logInput)) +cfull := *(*C.int)(unsafe.Pointer(&full)) +ceps := *(*C.double)(unsafe.Pointer(&eps)) +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +C.atg_poisson_nll_loss(ptr, input, target, clogInput, cfull, ceps, creduction) +} +func AtgPolar(ptr *Ctensor, abs Ctensor, angle Ctensor){ +C.atg_polar(ptr, abs, angle) +} +func AtgPolarOut(ptr *Ctensor, out Ctensor, abs Ctensor, angle Ctensor){ +C.atg_polar_out(ptr, out, abs, angle) +} +func AtgPolygamma(ptr *Ctensor, n int64, self Ctensor){ +cn := *(*C.int64_t)(unsafe.Pointer(&n)) +C.atg_polygamma(ptr, cn, self) +} +func AtgPolygamma_(ptr *Ctensor, self Ctensor, n int64){ +cn := *(*C.int64_t)(unsafe.Pointer(&n)) +C.atg_polygamma_(ptr, self, cn) +} +func AtgPolygammaOut(ptr *Ctensor, out Ctensor, n int64, self Ctensor){ +cn := *(*C.int64_t)(unsafe.Pointer(&n)) +C.atg_polygamma_out(ptr, out, cn, self) +} +func AtgPow(ptr *Ctensor, self Ctensor, exponent Cscalar){ +C.atg_pow(ptr, self, exponent ) +} +func AtgPow1(ptr *Ctensor, self Ctensor, exponent Ctensor){ +C.atg_pow1(ptr, self, exponent) +} +func AtgPow2(ptr *Ctensor, selfScalar Cscalar, exponent Ctensor){ +C.atg_pow2(ptr, selfScalar , exponent) +} +func AtgPow_(ptr *Ctensor, self Ctensor, exponent Cscalar){ +C.atg_pow_(ptr, self, exponent ) +} +func AtgPow1_(ptr *Ctensor, self Ctensor, exponent Ctensor){ +C.atg_pow_1(ptr, self, exponent) +} +func AtgPowOut(ptr *Ctensor, out Ctensor, self Ctensor, exponent Cscalar){ +C.atg_pow_out(ptr, out, self, exponent ) +} +func AtgPowOut1(ptr *Ctensor, out Ctensor, self Ctensor, exponent Ctensor){ +C.atg_pow_out1(ptr, out, self, exponent) +} +func AtgPowOut2(ptr *Ctensor, out Ctensor, selfScalar Cscalar, exponent Ctensor){ +C.atg_pow_out2(ptr, out, selfScalar , exponent) +} +func AtgPrelu(ptr *Ctensor, self Ctensor, weight Ctensor){ +C.atg_prelu(ptr, self, weight) +} +func AtgPreluBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, weight Ctensor){ +C.atg_prelu_backward(ptr, gradOutput, self, weight) +} +func AtgProd(ptr *Ctensor, self Ctensor, dtype int32){ +cdtype := *(*C.int)(unsafe.Pointer(&dtype)) +C.atg_prod(ptr, self, cdtype) +} +func AtgProd1(ptr *Ctensor, self Ctensor, dim int64, keepdim int32, dtype int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +cdtype := *(*C.int)(unsafe.Pointer(&dtype)) +C.atg_prod1(ptr, self, cdim, ckeepdim, cdtype) +} +func AtgProdOut(ptr *Ctensor, out Ctensor, self Ctensor, dim int64, keepdim int32, dtype int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +cdtype := *(*C.int)(unsafe.Pointer(&dtype)) +C.atg_prod_out(ptr, out, self, cdim, ckeepdim, cdtype) +} +func AtgPut_(ptr *Ctensor, self Ctensor, index Ctensor, source Ctensor, accumulate int32){ +caccumulate := *(*C.int)(unsafe.Pointer(&accumulate)) +C.atg_put_(ptr, self, index, source, caccumulate) +} +func AtgQPerChannelScales(ptr *Ctensor, self Ctensor){ +C.atg_q_per_channel_scales(ptr, self) +} +func AtgQPerChannelZeroPoints(ptr *Ctensor, self Ctensor){ +C.atg_q_per_channel_zero_points(ptr, self) +} +func AtgQr(ptr *Ctensor, self Ctensor, some int32){ +csome := *(*C.int)(unsafe.Pointer(&some)) +C.atg_qr(ptr, self, csome) +} +func AtgQrOut(ptr *Ctensor, q Ctensor, r Ctensor, self Ctensor, some int32){ +csome := *(*C.int)(unsafe.Pointer(&some)) +C.atg_qr_out(ptr, q, r, self, csome) +} +func AtgQuantile(ptr *Ctensor, self Ctensor, q float64, dimVal int64, dimNull int, keepdim int32){ +cq := *(*C.double)(unsafe.Pointer(&q)) +cdimVal := *(*C.int64_t)(unsafe.Pointer(&dimVal)) +cdimNull := *(*C.uint8_t)(unsafe.Pointer(&dimNull)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_quantile(ptr, self, cq, cdimVal, cdimNull, ckeepdim) +} +func AtgQuantile1(ptr *Ctensor, self Ctensor, q Ctensor, dimVal int64, dimNull int, keepdim int32){ +cdimVal := *(*C.int64_t)(unsafe.Pointer(&dimVal)) +cdimNull := *(*C.uint8_t)(unsafe.Pointer(&dimNull)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_quantile1(ptr, self, q, cdimVal, cdimNull, ckeepdim) +} +func AtgQuantileOut(ptr *Ctensor, out Ctensor, self Ctensor, q float64, dimVal int64, dimNull int, keepdim int32){ +cq := *(*C.double)(unsafe.Pointer(&q)) +cdimVal := *(*C.int64_t)(unsafe.Pointer(&dimVal)) +cdimNull := *(*C.uint8_t)(unsafe.Pointer(&dimNull)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_quantile_out(ptr, out, self, cq, cdimVal, cdimNull, ckeepdim) +} +func AtgQuantileOut1(ptr *Ctensor, out Ctensor, self Ctensor, q Ctensor, dimVal int64, dimNull int, keepdim int32){ +cdimVal := *(*C.int64_t)(unsafe.Pointer(&dimVal)) +cdimNull := *(*C.uint8_t)(unsafe.Pointer(&dimNull)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_quantile_out1(ptr, out, self, q, cdimVal, cdimNull, ckeepdim) +} +func AtgQuantizePerChannel(ptr *Ctensor, self Ctensor, scales Ctensor, zeroPoints Ctensor, axis int64, dtype int32){ +caxis := *(*C.int64_t)(unsafe.Pointer(&axis)) +cdtype := *(*C.int)(unsafe.Pointer(&dtype)) +C.atg_quantize_per_channel(ptr, self, scales, zeroPoints, caxis, cdtype) +} +func AtgQuantizePerTensor(ptr *Ctensor, self Ctensor, scale float64, zeroPoint int64, dtype int32){ +cscale := *(*C.double)(unsafe.Pointer(&scale)) +czeroPoint := *(*C.int64_t)(unsafe.Pointer(&zeroPoint)) +cdtype := *(*C.int)(unsafe.Pointer(&dtype)) +C.atg_quantize_per_tensor(ptr, self, cscale, czeroPoint, cdtype) +} + +func AtgQuantizedBatchNorm(ptr *Ctensor, input Ctensor, weight Ctensor, bias Ctensor, mean Ctensor, vari Ctensor, eps float64, outputScale float64, outputZeroPoint int64){ +ceps := *(*C.double)(unsafe.Pointer(&eps)) +coutputScale := *(*C.double)(unsafe.Pointer(&outputScale)) +coutputZeroPoint := *(*C.int64_t)(unsafe.Pointer(&outputZeroPoint)) +C.atg_quantized_batch_norm(ptr, input, weight, bias, mean, vari, ceps, coutputScale, coutputZeroPoint) +} +func AtgQuantizedGruCell(ptr *Ctensor, input Ctensor, hx Ctensor, wIh Ctensor, wHh Ctensor, bIh Ctensor, bHh Ctensor, packedIh Ctensor, packedHh Ctensor, colOffsetsIh Ctensor, colOffsetsHh Ctensor, scaleIh Cscalar, scaleHh Cscalar, zeroPointIh Cscalar, zeroPointHh Cscalar){ +C.atg_quantized_gru_cell(ptr, input, hx, wIh, wHh, bIh, bHh, packedIh, packedHh, colOffsetsIh, colOffsetsHh, scaleIh , scaleHh , zeroPointIh , zeroPointHh ) +} +func AtgQuantizedLstmCell(ptr *Ctensor, input Ctensor, hxData []Ctensor, hxLen int, wIh Ctensor, wHh Ctensor, bIh Ctensor, bHh Ctensor, packedIh Ctensor, packedHh Ctensor, colOffsetsIh Ctensor, colOffsetsHh Ctensor, scaleIh Cscalar, scaleHh Cscalar, zeroPointIh Cscalar, zeroPointHh Cscalar){ +chxDataPtr := (*Ctensor)(unsafe.Pointer(&hxData[0])) +chxLen := *(*C.int)(unsafe.Pointer(&hxLen)) +C.atg_quantized_lstm_cell(ptr, input, chxDataPtr, chxLen, wIh, wHh, bIh, bHh, packedIh, packedHh, colOffsetsIh, colOffsetsHh, scaleIh , scaleHh , zeroPointIh , zeroPointHh ) +} +func AtgQuantizedMaxPool1d(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) +C.atg_quantized_max_pool1d(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode) +} +func AtgQuantizedMaxPool2d(ptr *Ctensor, self Ctensor, kernelSizeData []int64, kernelSizeLen int, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int, ceilMode int32){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +cceilMode := *(*C.int)(unsafe.Pointer(&ceilMode)) +C.atg_quantized_max_pool2d(ptr, self, ckernelSizeDataPtr, ckernelSizeLen, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen, cceilMode) +} +func AtgQuantizedRnnReluCell(ptr *Ctensor, input Ctensor, hx Ctensor, wIh Ctensor, wHh Ctensor, bIh Ctensor, bHh Ctensor, packedIh Ctensor, packedHh Ctensor, colOffsetsIh Ctensor, colOffsetsHh Ctensor, scaleIh Cscalar, scaleHh Cscalar, zeroPointIh Cscalar, zeroPointHh Cscalar){ +C.atg_quantized_rnn_relu_cell(ptr, input, hx, wIh, wHh, bIh, bHh, packedIh, packedHh, colOffsetsIh, colOffsetsHh, scaleIh , scaleHh , zeroPointIh , zeroPointHh ) +} +func AtgQuantizedRnnTanhCell(ptr *Ctensor, input Ctensor, hx Ctensor, wIh Ctensor, wHh Ctensor, bIh Ctensor, bHh Ctensor, packedIh Ctensor, packedHh Ctensor, colOffsetsIh Ctensor, colOffsetsHh Ctensor, scaleIh Cscalar, scaleHh Cscalar, zeroPointIh Cscalar, zeroPointHh Cscalar){ +C.atg_quantized_rnn_tanh_cell(ptr, input, hx, wIh, wHh, bIh, bHh, packedIh, packedHh, colOffsetsIh, colOffsetsHh, scaleIh , scaleHh , zeroPointIh , zeroPointHh ) +} +func AtgRad2deg(ptr *Ctensor, self Ctensor){ +C.atg_rad2deg(ptr, self) +} +func AtgRad2deg_(ptr *Ctensor, self Ctensor){ +C.atg_rad2deg_(ptr, self) +} +func AtgRad2degOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_rad2deg_out(ptr, out, self) +} +func AtgRand(ptr *Ctensor, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32){ +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_rand(ptr, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice) +} +func AtgRandLike(ptr *Ctensor, self Ctensor){ +C.atg_rand_like(ptr, self) +} +func AtgRandOut(ptr *Ctensor, out Ctensor, sizeData []int64, sizeLen int){ +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +C.atg_rand_out(ptr, out, csizeDataPtr, csizeLen) +} +func AtgRandint(ptr *Ctensor, high int64, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32){ +chigh := *(*C.int64_t)(unsafe.Pointer(&high)) +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_randint(ptr, chigh, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice) +} +func AtgRandint1(ptr *Ctensor, low int64, high int64, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32){ +clow := *(*C.int64_t)(unsafe.Pointer(&low)) +chigh := *(*C.int64_t)(unsafe.Pointer(&high)) +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_randint1(ptr, clow, chigh, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice) +} +func AtgRandintLike(ptr *Ctensor, self Ctensor, high int64){ +chigh := *(*C.int64_t)(unsafe.Pointer(&high)) +C.atg_randint_like(ptr, self, chigh) +} +func AtgRandintLike1(ptr *Ctensor, self Ctensor, low int64, high int64){ +clow := *(*C.int64_t)(unsafe.Pointer(&low)) +chigh := *(*C.int64_t)(unsafe.Pointer(&high)) +C.atg_randint_like1(ptr, self, clow, chigh) +} +func AtgRandintOut(ptr *Ctensor, out Ctensor, high int64, sizeData []int64, sizeLen int){ +chigh := *(*C.int64_t)(unsafe.Pointer(&high)) +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +C.atg_randint_out(ptr, out, chigh, csizeDataPtr, csizeLen) +} +func AtgRandintOut1(ptr *Ctensor, out Ctensor, low int64, high int64, sizeData []int64, sizeLen int){ +clow := *(*C.int64_t)(unsafe.Pointer(&low)) +chigh := *(*C.int64_t)(unsafe.Pointer(&high)) +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +C.atg_randint_out1(ptr, out, clow, chigh, csizeDataPtr, csizeLen) +} +func AtgRandn(ptr *Ctensor, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32){ +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_randn(ptr, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice) +} +func AtgRandnLike(ptr *Ctensor, self Ctensor){ +C.atg_randn_like(ptr, self) +} +func AtgRandnOut(ptr *Ctensor, out Ctensor, sizeData []int64, sizeLen int){ +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +C.atg_randn_out(ptr, out, csizeDataPtr, csizeLen) +} +func AtgRandom_(ptr *Ctensor, self Ctensor){ +C.atg_random_(ptr, self) +} +func AtgRandom1_(ptr *Ctensor, self Ctensor, to int64){ +cto := *(*C.int64_t)(unsafe.Pointer(&to)) +C.atg_random_1(ptr, self, cto) +} +func AtgRandom2(ptr *Ctensor, self Ctensor, from int64, toVal int64, toNull int){ +cfrom := *(*C.int64_t)(unsafe.Pointer(&from)) +ctoVal := *(*C.int64_t)(unsafe.Pointer(&toVal)) +ctoNull := *(*C.uint8_t)(unsafe.Pointer(&toNull)) +C.atg_random_2(ptr, self, cfrom, ctoVal, ctoNull) +} +func AtgRandperm(ptr *Ctensor, n int64, optionsKind int32, optionsDevice int32){ +cn := *(*C.int64_t)(unsafe.Pointer(&n)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_randperm(ptr, cn, coptionsKind, coptionsDevice) +} +func AtgRandpermOut(ptr *Ctensor, out Ctensor, n int64){ +cn := *(*C.int64_t)(unsafe.Pointer(&n)) +C.atg_randperm_out(ptr, out, cn) +} +func AtgRange(ptr *Ctensor, start Cscalar, end Cscalar, optionsKind int32, optionsDevice int32){ +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_range(ptr, start , end , coptionsKind, coptionsDevice) +} +func AtgRange1(ptr *Ctensor, start Cscalar, end Cscalar, optionsKind int32, optionsDevice int32){ +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_range1(ptr, start , end , coptionsKind, coptionsDevice) +} +func AtgRangeOut(ptr *Ctensor, out Ctensor, start Cscalar, end Cscalar){ +C.atg_range_out(ptr, out, start , end ) +} +func AtgReal(ptr *Ctensor, self Ctensor){ +C.atg_real(ptr, self) +} +func AtgReciprocal(ptr *Ctensor, self Ctensor){ +C.atg_reciprocal(ptr, self) +} +func AtgReciprocal_(ptr *Ctensor, self Ctensor){ +C.atg_reciprocal_(ptr, self) +} +func AtgReciprocalOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_reciprocal_out(ptr, out, self) +} +func AtgReflectionPad1d(ptr *Ctensor, self Ctensor, paddingData []int64, paddingLen int){ +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +C.atg_reflection_pad1d(ptr, self, cpaddingDataPtr, cpaddingLen) +} +func AtgReflectionPad1dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int){ +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +C.atg_reflection_pad1d_backward(ptr, gradOutput, self, cpaddingDataPtr, cpaddingLen) +} +func AtgReflectionPad1dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int){ +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +C.atg_reflection_pad1d_backward_out(ptr, gradInput, gradOutput, self, cpaddingDataPtr, cpaddingLen) +} +func AtgReflectionPad1dOut(ptr *Ctensor, out Ctensor, self Ctensor, paddingData []int64, paddingLen int){ +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +C.atg_reflection_pad1d_out(ptr, out, self, cpaddingDataPtr, cpaddingLen) +} +func AtgReflectionPad2d(ptr *Ctensor, self Ctensor, paddingData []int64, paddingLen int){ +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +C.atg_reflection_pad2d(ptr, self, cpaddingDataPtr, cpaddingLen) +} +func AtgReflectionPad2dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int){ +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +C.atg_reflection_pad2d_backward(ptr, gradOutput, self, cpaddingDataPtr, cpaddingLen) +} +func AtgReflectionPad2dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int){ +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +C.atg_reflection_pad2d_backward_out(ptr, gradInput, gradOutput, self, cpaddingDataPtr, cpaddingLen) +} +func AtgReflectionPad2dOut(ptr *Ctensor, out Ctensor, self Ctensor, paddingData []int64, paddingLen int){ +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +C.atg_reflection_pad2d_out(ptr, out, self, cpaddingDataPtr, cpaddingLen) +} +func AtgRelu(ptr *Ctensor, self Ctensor){ +C.atg_relu(ptr, self) +} +func AtgRelu_(ptr *Ctensor, self Ctensor){ +C.atg_relu_(ptr, self) +} +func AtgRemainder(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_remainder(ptr, self, other ) +} +func AtgRemainder1(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_remainder1(ptr, self, other) +} +func AtgRemainder_(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_remainder_(ptr, self, other ) +} +func AtgRemainder1_(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_remainder_1(ptr, self, other) +} +func AtgRemainderOut(ptr *Ctensor, out Ctensor, self Ctensor, other Cscalar){ +C.atg_remainder_out(ptr, out, self, other ) +} +func AtgRemainderOut1(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ +C.atg_remainder_out1(ptr, out, self, other) +} +func AtgRenorm(ptr *Ctensor, self Ctensor, p Cscalar, dim int64, maxnorm Cscalar){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_renorm(ptr, self, p , cdim, maxnorm ) +} +func AtgRenorm_(ptr *Ctensor, self Ctensor, p Cscalar, dim int64, maxnorm Cscalar){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_renorm_(ptr, self, p , cdim, maxnorm ) +} +func AtgRenormOut(ptr *Ctensor, out Ctensor, self Ctensor, p Cscalar, dim int64, maxnorm Cscalar){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_renorm_out(ptr, out, self, p , cdim, maxnorm ) +} +func AtgRepeat(ptr *Ctensor, self Ctensor, repeatsData []int64, repeatsLen int){ +crepeatsDataPtr := (*C.int64_t)(unsafe.Pointer(&repeatsData[0])) +crepeatsLen := *(*C.int)(unsafe.Pointer(&repeatsLen)) +C.atg_repeat(ptr, self, crepeatsDataPtr, crepeatsLen) +} +func AtgRepeatInterleave(ptr *Ctensor, repeats Ctensor){ +C.atg_repeat_interleave(ptr, repeats) +} +func AtgRepeatInterleave1(ptr *Ctensor, self Ctensor, repeats Ctensor, dimVal int64, dimNull int){ +cdimVal := *(*C.int64_t)(unsafe.Pointer(&dimVal)) +cdimNull := *(*C.uint8_t)(unsafe.Pointer(&dimNull)) +C.atg_repeat_interleave1(ptr, self, repeats, cdimVal, cdimNull) +} +func AtgRepeatInterleave2(ptr *Ctensor, self Ctensor, repeats int64, dimVal int64, dimNull int){ +crepeats := *(*C.int64_t)(unsafe.Pointer(&repeats)) +cdimVal := *(*C.int64_t)(unsafe.Pointer(&dimVal)) +cdimNull := *(*C.uint8_t)(unsafe.Pointer(&dimNull)) +C.atg_repeat_interleave2(ptr, self, crepeats, cdimVal, cdimNull) +} +func AtgReplicationPad1d(ptr *Ctensor, self Ctensor, paddingData []int64, paddingLen int){ +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +C.atg_replication_pad1d(ptr, self, cpaddingDataPtr, cpaddingLen) +} +func AtgReplicationPad1dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int){ +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +C.atg_replication_pad1d_backward(ptr, gradOutput, self, cpaddingDataPtr, cpaddingLen) +} +func AtgReplicationPad1dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int){ +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +C.atg_replication_pad1d_backward_out(ptr, gradInput, gradOutput, self, cpaddingDataPtr, cpaddingLen) +} +func AtgReplicationPad1dOut(ptr *Ctensor, out Ctensor, self Ctensor, paddingData []int64, paddingLen int){ +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +C.atg_replication_pad1d_out(ptr, out, self, cpaddingDataPtr, cpaddingLen) +} +func AtgReplicationPad2d(ptr *Ctensor, self Ctensor, paddingData []int64, paddingLen int){ +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +C.atg_replication_pad2d(ptr, self, cpaddingDataPtr, cpaddingLen) +} +func AtgReplicationPad2dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int){ +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +C.atg_replication_pad2d_backward(ptr, gradOutput, self, cpaddingDataPtr, cpaddingLen) +} +func AtgReplicationPad2dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int){ +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +C.atg_replication_pad2d_backward_out(ptr, gradInput, gradOutput, self, cpaddingDataPtr, cpaddingLen) +} +func AtgReplicationPad2dOut(ptr *Ctensor, out Ctensor, self Ctensor, paddingData []int64, paddingLen int){ +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +C.atg_replication_pad2d_out(ptr, out, self, cpaddingDataPtr, cpaddingLen) +} +func AtgReplicationPad3d(ptr *Ctensor, self Ctensor, paddingData []int64, paddingLen int){ +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +C.atg_replication_pad3d(ptr, self, cpaddingDataPtr, cpaddingLen) +} +func AtgReplicationPad3dBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int){ +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +C.atg_replication_pad3d_backward(ptr, gradOutput, self, cpaddingDataPtr, cpaddingLen) +} +func AtgReplicationPad3dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, paddingData []int64, paddingLen int){ +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +C.atg_replication_pad3d_backward_out(ptr, gradInput, gradOutput, self, cpaddingDataPtr, cpaddingLen) +} +func AtgReplicationPad3dOut(ptr *Ctensor, out Ctensor, self Ctensor, paddingData []int64, paddingLen int){ +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +C.atg_replication_pad3d_out(ptr, out, self, cpaddingDataPtr, cpaddingLen) +} +func AtgRequiresGrad_(ptr *Ctensor, self Ctensor, requiresGrad int32){ +crequiresGrad := *(*C.int)(unsafe.Pointer(&requiresGrad)) +C.atg_requires_grad_(ptr, self, crequiresGrad) +} +func AtgReshape(ptr *Ctensor, self Ctensor, shapeData []int64, shapeLen int){ +cshapeDataPtr := (*C.int64_t)(unsafe.Pointer(&shapeData[0])) +cshapeLen := *(*C.int)(unsafe.Pointer(&shapeLen)) +C.atg_reshape(ptr, self, cshapeDataPtr, cshapeLen) +} +func AtgReshapeAs(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_reshape_as(ptr, self, other) +} +func AtgResize_(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int){ +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +C.atg_resize_(ptr, self, csizeDataPtr, csizeLen) +} +func AtgResizeAs_(ptr *Ctensor, self Ctensor, theTemplate Ctensor){ +C.atg_resize_as_(ptr, self, theTemplate) +} +func AtgRfft(ptr *Ctensor, self Ctensor, signalNdim int64, normalized int32, onesided int32){ +csignalNdim := *(*C.int64_t)(unsafe.Pointer(&signalNdim)) +cnormalized := *(*C.int)(unsafe.Pointer(&normalized)) +conesided := *(*C.int)(unsafe.Pointer(&onesided)) +C.atg_rfft(ptr, self, csignalNdim, cnormalized, conesided) +} +func AtgRnnRelu(ptr *Ctensor, input Ctensor, hx Ctensor, paramsData []Ctensor, paramsLen int, hasBiases int32, numLayers int64, dropout float64, train int32, bidirectional int32, batchFirst int32){ +cparamsDataPtr := (*Ctensor)(unsafe.Pointer(¶msData[0])) +cparamsLen := *(*C.int)(unsafe.Pointer(¶msLen)) +chasBiases := *(*C.int)(unsafe.Pointer(&hasBiases)) +cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) +cdropout := *(*C.double)(unsafe.Pointer(&dropout)) +ctrain := *(*C.int)(unsafe.Pointer(&train)) +cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) +cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst)) +C.atg_rnn_relu(ptr, input, hx, cparamsDataPtr, cparamsLen, chasBiases, cnumLayers, cdropout, ctrain, cbidirectional, cbatchFirst) +} +func AtgRnnRelu1(ptr *Ctensor, data Ctensor, batchSizes Ctensor, hx Ctensor, paramsData []Ctensor, paramsLen int, hasBiases int32, numLayers int64, dropout float64, train int32, bidirectional int32){ +cparamsDataPtr := (*Ctensor)(unsafe.Pointer(¶msData[0])) +cparamsLen := *(*C.int)(unsafe.Pointer(¶msLen)) +chasBiases := *(*C.int)(unsafe.Pointer(&hasBiases)) +cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) +cdropout := *(*C.double)(unsafe.Pointer(&dropout)) +ctrain := *(*C.int)(unsafe.Pointer(&train)) +cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) +C.atg_rnn_relu1(ptr, data, batchSizes, hx, cparamsDataPtr, cparamsLen, chasBiases, cnumLayers, cdropout, ctrain, cbidirectional) +} +func AtgRnnReluCell(ptr *Ctensor, input Ctensor, hx Ctensor, wIh Ctensor, wHh Ctensor, bIh Ctensor, bHh Ctensor){ +C.atg_rnn_relu_cell(ptr, input, hx, wIh, wHh, bIh, bHh) +} +func AtgRnnTanh(ptr *Ctensor, input Ctensor, hx Ctensor, paramsData []Ctensor, paramsLen int, hasBiases int32, numLayers int64, dropout float64, train int32, bidirectional int32, batchFirst int32){ +cparamsDataPtr := (*Ctensor)(unsafe.Pointer(¶msData[0])) +cparamsLen := *(*C.int)(unsafe.Pointer(¶msLen)) +chasBiases := *(*C.int)(unsafe.Pointer(&hasBiases)) +cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) +cdropout := *(*C.double)(unsafe.Pointer(&dropout)) +ctrain := *(*C.int)(unsafe.Pointer(&train)) +cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) +cbatchFirst := *(*C.int)(unsafe.Pointer(&batchFirst)) +C.atg_rnn_tanh(ptr, input, hx, cparamsDataPtr, cparamsLen, chasBiases, cnumLayers, cdropout, ctrain, cbidirectional, cbatchFirst) +} +func AtgRnnTanh1(ptr *Ctensor, data Ctensor, batchSizes Ctensor, hx Ctensor, paramsData []Ctensor, paramsLen int, hasBiases int32, numLayers int64, dropout float64, train int32, bidirectional int32){ +cparamsDataPtr := (*Ctensor)(unsafe.Pointer(¶msData[0])) +cparamsLen := *(*C.int)(unsafe.Pointer(¶msLen)) +chasBiases := *(*C.int)(unsafe.Pointer(&hasBiases)) +cnumLayers := *(*C.int64_t)(unsafe.Pointer(&numLayers)) +cdropout := *(*C.double)(unsafe.Pointer(&dropout)) +ctrain := *(*C.int)(unsafe.Pointer(&train)) +cbidirectional := *(*C.int)(unsafe.Pointer(&bidirectional)) +C.atg_rnn_tanh1(ptr, data, batchSizes, hx, cparamsDataPtr, cparamsLen, chasBiases, cnumLayers, cdropout, ctrain, cbidirectional) +} +func AtgRnnTanhCell(ptr *Ctensor, input Ctensor, hx Ctensor, wIh Ctensor, wHh Ctensor, bIh Ctensor, bHh Ctensor){ +C.atg_rnn_tanh_cell(ptr, input, hx, wIh, wHh, bIh, bHh) +} +func AtgRoll(ptr *Ctensor, self Ctensor, shiftsData []int64, shiftsLen int, dimsData []int64, dimsLen int){ +cshiftsDataPtr := (*C.int64_t)(unsafe.Pointer(&shiftsData[0])) +cshiftsLen := *(*C.int)(unsafe.Pointer(&shiftsLen)) +cdimsDataPtr := (*C.int64_t)(unsafe.Pointer(&dimsData[0])) +cdimsLen := *(*C.int)(unsafe.Pointer(&dimsLen)) +C.atg_roll(ptr, self, cshiftsDataPtr, cshiftsLen, cdimsDataPtr, cdimsLen) +} +func AtgRot90(ptr *Ctensor, self Ctensor, k int64, dimsData []int64, dimsLen int){ +ck := *(*C.int64_t)(unsafe.Pointer(&k)) +cdimsDataPtr := (*C.int64_t)(unsafe.Pointer(&dimsData[0])) +cdimsLen := *(*C.int)(unsafe.Pointer(&dimsLen)) +C.atg_rot90(ptr, self, ck, cdimsDataPtr, cdimsLen) +} +func AtgRound(ptr *Ctensor, self Ctensor){ +C.atg_round(ptr, self) +} +func AtgRound_(ptr *Ctensor, self Ctensor){ +C.atg_round_(ptr, self) +} +func AtgRoundOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_round_out(ptr, out, self) +} +func AtgRrelu(ptr *Ctensor, self Ctensor, training int32){ +ctraining := *(*C.int)(unsafe.Pointer(&training)) +C.atg_rrelu(ptr, self, ctraining) +} +func AtgRrelu_(ptr *Ctensor, self Ctensor, training int32){ +ctraining := *(*C.int)(unsafe.Pointer(&training)) +C.atg_rrelu_(ptr, self, ctraining) +} +func AtgRreluWithNoise(ptr *Ctensor, self Ctensor, noise Ctensor, training int32){ +ctraining := *(*C.int)(unsafe.Pointer(&training)) +C.atg_rrelu_with_noise(ptr, self, noise, ctraining) +} +func AtgRreluWithNoise_(ptr *Ctensor, self Ctensor, noise Ctensor, training int32){ +ctraining := *(*C.int)(unsafe.Pointer(&training)) +C.atg_rrelu_with_noise_(ptr, self, noise, ctraining) +} +func AtgRreluWithNoiseBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, noise Ctensor, lower Cscalar, upper Cscalar, training int32, selfIsResult int32){ +ctraining := *(*C.int)(unsafe.Pointer(&training)) +cselfIsResult := *(*C.int)(unsafe.Pointer(&selfIsResult)) +C.atg_rrelu_with_noise_backward(ptr, gradOutput, self, noise, lower , upper , ctraining, cselfIsResult) +} +func AtgRreluWithNoiseOut(ptr *Ctensor, out Ctensor, self Ctensor, noise Ctensor, training int32){ +ctraining := *(*C.int)(unsafe.Pointer(&training)) +C.atg_rrelu_with_noise_out(ptr, out, self, noise, ctraining) +} +func AtgRsqrt(ptr *Ctensor, self Ctensor){ +C.atg_rsqrt(ptr, self) +} +func AtgRsqrt_(ptr *Ctensor, self Ctensor){ +C.atg_rsqrt_(ptr, self) +} +func AtgRsqrtOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_rsqrt_out(ptr, out, self) +} +func AtgRsub(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_rsub(ptr, self, other) +} +func AtgRsub1(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_rsub1(ptr, self, other ) +} +func AtgScalarTensor(ptr *Ctensor, s Cscalar, optionsKind int32, optionsDevice int32){ +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_scalar_tensor(ptr, s , coptionsKind, coptionsDevice) +} +func AtgScatter(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, src Ctensor){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_scatter(ptr, self, cdim, index, src) +} +func AtgScatter1(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, value Cscalar){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_scatter1(ptr, self, cdim, index, value ) +} +func AtgScatter_(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, src Ctensor){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_scatter_(ptr, self, cdim, index, src) +} +func AtgScatter1_(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, value Cscalar){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_scatter_1(ptr, self, cdim, index, value ) +} +func AtgScatter2(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, src Ctensor, reduce string){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +creduce := C.CString(reduce) +reduceLen := len(reduce) +creduceLen := *(*C.int)(unsafe.Pointer(&reduceLen)) +C.atg_scatter_2(ptr, self, cdim, index, src, creduce, creduceLen) +} +func AtgScatter3(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, value Cscalar, reduce string){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +creduce := C.CString(reduce) +reduceLen := len(reduce) +creduceLen := *(*C.int)(unsafe.Pointer(&reduceLen)) +C.atg_scatter_3(ptr, self, cdim, index, value , creduce, creduceLen) +} +func AtgScatterAdd(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, src Ctensor){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_scatter_add(ptr, self, cdim, index, src) +} +func AtgScatterAdd_(ptr *Ctensor, self Ctensor, dim int64, index Ctensor, src Ctensor){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_scatter_add_(ptr, self, cdim, index, src) +} +func AtgSearchsorted(ptr *Ctensor, sortedSequence Ctensor, self Ctensor, outInt32 int32, right int32){ +coutInt32 := *(*C.int)(unsafe.Pointer(&outInt32)) +cright := *(*C.int)(unsafe.Pointer(&right)) +C.atg_searchsorted(ptr, sortedSequence, self, coutInt32, cright) +} +func AtgSearchsorted1(ptr *Ctensor, sortedSequence Ctensor, selfScalar Cscalar, outInt32 int32, right int32){ +coutInt32 := *(*C.int)(unsafe.Pointer(&outInt32)) +cright := *(*C.int)(unsafe.Pointer(&right)) +C.atg_searchsorted1(ptr, sortedSequence, selfScalar , coutInt32, cright) +} +func AtgSearchsortedOut(ptr *Ctensor, out Ctensor, sortedSequence Ctensor, self Ctensor, outInt32 int32, right int32){ +coutInt32 := *(*C.int)(unsafe.Pointer(&outInt32)) +cright := *(*C.int)(unsafe.Pointer(&right)) +C.atg_searchsorted_out(ptr, out, sortedSequence, self, coutInt32, cright) +} +func AtgSelect(ptr *Ctensor, self Ctensor, dim int64, index int64){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +cindex := *(*C.int64_t)(unsafe.Pointer(&index)) +C.atg_select(ptr, self, cdim, cindex) +} +func AtgSelectBackward(ptr *Ctensor, grad Ctensor, inputSizesData []int64, inputSizesLen int, dim int64, index int64){ +cinputSizesDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizesData[0])) +cinputSizesLen := *(*C.int)(unsafe.Pointer(&inputSizesLen)) +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +cindex := *(*C.int64_t)(unsafe.Pointer(&index)) +C.atg_select_backward(ptr, grad, cinputSizesDataPtr, cinputSizesLen, cdim, cindex) +} +func AtgSelu(ptr *Ctensor, self Ctensor){ +C.atg_selu(ptr, self) +} +func AtgSelu_(ptr *Ctensor, self Ctensor){ +C.atg_selu_(ptr, self) +} +func AtgSet_(ptr *Ctensor, self Ctensor){ +C.atg_set_(ptr, self) +} +func AtgSet1_(ptr *Ctensor, self Ctensor, source Ctensor){ +C.atg_set_1(ptr, self, source) +} +func AtgSetRequiresGrad(ptr *Ctensor, self Ctensor, r int32){ +cr := *(*C.int)(unsafe.Pointer(&r)) +C.atg_set_requires_grad(ptr, self, cr) +} +func AtgSgn(ptr *Ctensor, self Ctensor){ +C.atg_sgn(ptr, self) +} +func AtgSgn_(ptr *Ctensor, self Ctensor){ +C.atg_sgn_(ptr, self) +} +func AtgSgnOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_sgn_out(ptr, out, self) +} +func AtgSigmoid(ptr *Ctensor, self Ctensor){ +C.atg_sigmoid(ptr, self) +} +func AtgSigmoid_(ptr *Ctensor, self Ctensor){ +C.atg_sigmoid_(ptr, self) +} +func AtgSigmoidBackward(ptr *Ctensor, gradOutput Ctensor, output Ctensor){ +C.atg_sigmoid_backward(ptr, gradOutput, output) +} +func AtgSigmoidBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, output Ctensor){ +C.atg_sigmoid_backward_out(ptr, gradInput, gradOutput, output) +} +func AtgSigmoidOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_sigmoid_out(ptr, out, self) +} +func AtgSign(ptr *Ctensor, self Ctensor){ +C.atg_sign(ptr, self) +} +func AtgSign_(ptr *Ctensor, self Ctensor){ +C.atg_sign_(ptr, self) +} +func AtgSignOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_sign_out(ptr, out, self) +} +func AtgSignbit(ptr *Ctensor, self Ctensor){ +C.atg_signbit(ptr, self) +} +func AtgSignbitOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_signbit_out(ptr, out, self) +} +func AtgSilu(ptr *Ctensor, self Ctensor){ +C.atg_silu(ptr, self) +} +func AtgSilu_(ptr *Ctensor, self Ctensor){ +C.atg_silu_(ptr, self) +} +func AtgSiluBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor){ +C.atg_silu_backward(ptr, gradOutput, self) +} +func AtgSiluOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_silu_out(ptr, out, self) +} +func AtgSin(ptr *Ctensor, self Ctensor){ +C.atg_sin(ptr, self) +} +func AtgSin_(ptr *Ctensor, self Ctensor){ +C.atg_sin_(ptr, self) +} +func AtgSinOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_sin_out(ptr, out, self) +} +func AtgSinh(ptr *Ctensor, self Ctensor){ +C.atg_sinh(ptr, self) +} +func AtgSinh_(ptr *Ctensor, self Ctensor){ +C.atg_sinh_(ptr, self) +} +func AtgSinhOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_sinh_out(ptr, out, self) +} +func AtgSlice(ptr *Ctensor, self Ctensor, dim int64, start int64, end int64, step int64){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +cstart := *(*C.int64_t)(unsafe.Pointer(&start)) +cend := *(*C.int64_t)(unsafe.Pointer(&end)) +cstep := *(*C.int64_t)(unsafe.Pointer(&step)) +C.atg_slice(ptr, self, cdim, cstart, cend, cstep) +} +func AtgSliceBackward(ptr *Ctensor, grad Ctensor, inputSizesData []int64, inputSizesLen int, dim int64, start int64, end int64, step int64){ +cinputSizesDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizesData[0])) +cinputSizesLen := *(*C.int)(unsafe.Pointer(&inputSizesLen)) +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +cstart := *(*C.int64_t)(unsafe.Pointer(&start)) +cend := *(*C.int64_t)(unsafe.Pointer(&end)) +cstep := *(*C.int64_t)(unsafe.Pointer(&step)) +C.atg_slice_backward(ptr, grad, cinputSizesDataPtr, cinputSizesLen, cdim, cstart, cend, cstep) +} +func AtgSlogdet(ptr *Ctensor, self Ctensor){ +C.atg_slogdet(ptr, self) +} +func AtgSlowConv3d(ptr *Ctensor, self Ctensor, weight Ctensor, kernelSizeData []int64, kernelSizeLen int, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +C.atg_slow_conv3d(ptr, self, weight, ckernelSizeDataPtr, ckernelSizeLen, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen) +} +func AtgSlowConv3dOut(ptr *Ctensor, out Ctensor, self Ctensor, weight Ctensor, kernelSizeData []int64, kernelSizeLen int, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +C.atg_slow_conv3d_out(ptr, out, self, weight, ckernelSizeDataPtr, ckernelSizeLen, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen) +} +func AtgSlowConvDilated2d(ptr *Ctensor, self Ctensor, weight Ctensor, kernelSizeData []int64, kernelSizeLen int, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +C.atg_slow_conv_dilated2d(ptr, self, weight, ckernelSizeDataPtr, ckernelSizeLen, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen) +} +func AtgSlowConvDilated3d(ptr *Ctensor, self Ctensor, weight Ctensor, kernelSizeData []int64, kernelSizeLen int, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, dilationData []int64, dilationLen int){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +C.atg_slow_conv_dilated3d(ptr, self, weight, ckernelSizeDataPtr, ckernelSizeLen, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, cdilationDataPtr, cdilationLen) +} +func AtgSlowConvTranspose2d(ptr *Ctensor, self Ctensor, weight Ctensor, kernelSizeData []int64, kernelSizeLen int, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, outputPaddingData []int64, outputPaddingLen int, dilationData []int64, dilationLen int){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) +coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +C.atg_slow_conv_transpose2d(ptr, self, weight, ckernelSizeDataPtr, ckernelSizeLen, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, coutputPaddingDataPtr, coutputPaddingLen, cdilationDataPtr, cdilationLen) +} +func AtgSlowConvTranspose2dOut(ptr *Ctensor, out Ctensor, self Ctensor, weight Ctensor, kernelSizeData []int64, kernelSizeLen int, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, outputPaddingData []int64, outputPaddingLen int, dilationData []int64, dilationLen int){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) +coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +C.atg_slow_conv_transpose2d_out(ptr, out, self, weight, ckernelSizeDataPtr, ckernelSizeLen, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, coutputPaddingDataPtr, coutputPaddingLen, cdilationDataPtr, cdilationLen) +} +func AtgSlowConvTranspose3d(ptr *Ctensor, self Ctensor, weight Ctensor, kernelSizeData []int64, kernelSizeLen int, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, outputPaddingData []int64, outputPaddingLen int, dilationData []int64, dilationLen int){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) +coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +C.atg_slow_conv_transpose3d(ptr, self, weight, ckernelSizeDataPtr, ckernelSizeLen, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, coutputPaddingDataPtr, coutputPaddingLen, cdilationDataPtr, cdilationLen) +} +func AtgSlowConvTranspose3dOut(ptr *Ctensor, out Ctensor, self Ctensor, weight Ctensor, kernelSizeData []int64, kernelSizeLen int, bias Ctensor, strideData []int64, strideLen int, paddingData []int64, paddingLen int, outputPaddingData []int64, outputPaddingLen int, dilationData []int64, dilationLen int){ +ckernelSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&kernelSizeData[0])) +ckernelSizeLen := *(*C.int)(unsafe.Pointer(&kernelSizeLen)) +cstrideDataPtr := (*C.int64_t)(unsafe.Pointer(&strideData[0])) +cstrideLen := *(*C.int)(unsafe.Pointer(&strideLen)) +cpaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&paddingData[0])) +cpaddingLen := *(*C.int)(unsafe.Pointer(&paddingLen)) +coutputPaddingDataPtr := (*C.int64_t)(unsafe.Pointer(&outputPaddingData[0])) +coutputPaddingLen := *(*C.int)(unsafe.Pointer(&outputPaddingLen)) +cdilationDataPtr := (*C.int64_t)(unsafe.Pointer(&dilationData[0])) +cdilationLen := *(*C.int)(unsafe.Pointer(&dilationLen)) +C.atg_slow_conv_transpose3d_out(ptr, out, self, weight, ckernelSizeDataPtr, ckernelSizeLen, bias, cstrideDataPtr, cstrideLen, cpaddingDataPtr, cpaddingLen, coutputPaddingDataPtr, coutputPaddingLen, cdilationDataPtr, cdilationLen) +} +func AtgSmm(ptr *Ctensor, self Ctensor, mat2 Ctensor){ +C.atg_smm(ptr, self, mat2) +} +func AtgSmoothL1Loss(ptr *Ctensor, self Ctensor, target Ctensor, reduction int64, beta float64){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +cbeta := *(*C.double)(unsafe.Pointer(&beta)) +C.atg_smooth_l1_loss(ptr, self, target, creduction, cbeta) +} +func AtgSmoothL1LossBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, reduction int64, beta float64){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +cbeta := *(*C.double)(unsafe.Pointer(&beta)) +C.atg_smooth_l1_loss_backward(ptr, gradOutput, self, target, creduction, cbeta) +} +func AtgSmoothL1LossBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, reduction int64, beta float64){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +cbeta := *(*C.double)(unsafe.Pointer(&beta)) +C.atg_smooth_l1_loss_backward_out(ptr, gradInput, gradOutput, self, target, creduction, cbeta) +} +func AtgSmoothL1LossOut(ptr *Ctensor, out Ctensor, self Ctensor, target Ctensor, reduction int64, beta float64){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +cbeta := *(*C.double)(unsafe.Pointer(&beta)) +C.atg_smooth_l1_loss_out(ptr, out, self, target, creduction, cbeta) +} +func AtgSoftMarginLoss(ptr *Ctensor, self Ctensor, target Ctensor, reduction int64){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +C.atg_soft_margin_loss(ptr, self, target, creduction) +} +func AtgSoftMarginLossBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, reduction int64){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +C.atg_soft_margin_loss_backward(ptr, gradOutput, self, target, creduction) +} +func AtgSoftMarginLossBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, target Ctensor, reduction int64){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +C.atg_soft_margin_loss_backward_out(ptr, gradInput, gradOutput, self, target, creduction) +} +func AtgSoftMarginLossOut(ptr *Ctensor, out Ctensor, self Ctensor, target Ctensor, reduction int64){ +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +C.atg_soft_margin_loss_out(ptr, out, self, target, creduction) +} +func AtgSoftmax(ptr *Ctensor, self Ctensor, dim int64, dtype int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +cdtype := *(*C.int)(unsafe.Pointer(&dtype)) +C.atg_softmax(ptr, self, cdim, cdtype) +} +func AtgSoftplus(ptr *Ctensor, self Ctensor){ +C.atg_softplus(ptr, self) +} +func AtgSoftplusBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, beta Cscalar, threshold Cscalar, output Ctensor){ +C.atg_softplus_backward(ptr, gradOutput, self, beta , threshold , output) +} +func AtgSoftplusBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, beta Cscalar, threshold Cscalar, output Ctensor){ +C.atg_softplus_backward_out(ptr, gradInput, gradOutput, self, beta , threshold , output) +} +func AtgSoftplusOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_softplus_out(ptr, out, self) +} +func AtgSoftshrink(ptr *Ctensor, self Ctensor){ +C.atg_softshrink(ptr, self) +} +func AtgSoftshrinkBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, lambd Cscalar){ +C.atg_softshrink_backward(ptr, gradOutput, self, lambd ) +} +func AtgSoftshrinkBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, self Ctensor, lambd Cscalar){ +C.atg_softshrink_backward_out(ptr, gradInput, gradOutput, self, lambd ) +} +func AtgSoftshrinkOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_softshrink_out(ptr, out, self) +} +func AtgSolve(ptr *Ctensor, self Ctensor, a Ctensor){ +C.atg_solve(ptr, self, a) +} +func AtgSolveOut(ptr *Ctensor, solution Ctensor, lu Ctensor, self Ctensor, a Ctensor){ +C.atg_solve_out(ptr, solution, lu, self, a) +} +func AtgSort(ptr *Ctensor, self Ctensor, dim int64, descending int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +cdescending := *(*C.int)(unsafe.Pointer(&descending)) +C.atg_sort(ptr, self, cdim, cdescending) +} +func AtgSortOut(ptr *Ctensor, values Ctensor, indices Ctensor, self Ctensor, dim int64, descending int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +cdescending := *(*C.int)(unsafe.Pointer(&descending)) +C.atg_sort_out(ptr, values, indices, self, cdim, cdescending) +} +func AtgSparseCooTensor(ptr *Ctensor, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32){ +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_sparse_coo_tensor(ptr, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice) +} +func AtgSparseCooTensor1(ptr *Ctensor, indices Ctensor, values Ctensor, optionsKind int32, optionsDevice int32){ +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_sparse_coo_tensor1(ptr, indices, values, coptionsKind, coptionsDevice) +} +func AtgSparseCooTensor2(ptr *Ctensor, indices Ctensor, values Ctensor, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32){ +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_sparse_coo_tensor2(ptr, indices, values, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice) +} +func AtgSparseMask(ptr *Ctensor, self Ctensor, mask Ctensor){ +C.atg_sparse_mask(ptr, self, mask) +} +func AtgSparseResize_(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int, sparseDim int64, denseDim int64){ +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +csparseDim := *(*C.int64_t)(unsafe.Pointer(&sparseDim)) +cdenseDim := *(*C.int64_t)(unsafe.Pointer(&denseDim)) +C.atg_sparse_resize_(ptr, self, csizeDataPtr, csizeLen, csparseDim, cdenseDim) +} +func AtgSparseResizeAndClear_(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int, sparseDim int64, denseDim int64){ +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +csparseDim := *(*C.int64_t)(unsafe.Pointer(&sparseDim)) +cdenseDim := *(*C.int64_t)(unsafe.Pointer(&denseDim)) +C.atg_sparse_resize_and_clear_(ptr, self, csizeDataPtr, csizeLen, csparseDim, cdenseDim) +} + + +func AtgSqrt(ptr *Ctensor, self Ctensor){ +C.atg_sqrt(ptr, self) +} +func AtgSqrt_(ptr *Ctensor, self Ctensor){ +C.atg_sqrt_(ptr, self) +} +func AtgSqrtOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_sqrt_out(ptr, out, self) +} +func AtgSquare(ptr *Ctensor, self Ctensor){ +C.atg_square(ptr, self) +} +func AtgSquare_(ptr *Ctensor, self Ctensor){ +C.atg_square_(ptr, self) +} +func AtgSqueeze(ptr *Ctensor, self Ctensor){ +C.atg_squeeze(ptr, self) +} +func AtgSqueeze1(ptr *Ctensor, self Ctensor, dim int64){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_squeeze1(ptr, self, cdim) +} +func AtgSqueeze_(ptr *Ctensor, self Ctensor){ +C.atg_squeeze_(ptr, self) +} +func AtgSqueeze1_(ptr *Ctensor, self Ctensor, dim int64){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_squeeze_1(ptr, self, cdim) +} +func AtgSspaddmm(ptr *Ctensor, self Ctensor, mat1 Ctensor, mat2 Ctensor){ +C.atg_sspaddmm(ptr, self, mat1, mat2) +} +func AtgSspaddmmOut(ptr *Ctensor, out Ctensor, self Ctensor, mat1 Ctensor, mat2 Ctensor){ +C.atg_sspaddmm_out(ptr, out, self, mat1, mat2) +} +func AtgStack(ptr *Ctensor, tensorsData []Ctensor, tensorsLen int, dim int64){ +ctensorsDataPtr := (*Ctensor)(unsafe.Pointer(&tensorsData[0])) +ctensorsLen := *(*C.int)(unsafe.Pointer(&tensorsLen)) +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_stack(ptr, ctensorsDataPtr, ctensorsLen, cdim) +} +func AtgStackOut(ptr *Ctensor, out Ctensor, tensorsData []Ctensor, tensorsLen int, dim int64){ +ctensorsDataPtr := (*Ctensor)(unsafe.Pointer(&tensorsData[0])) +ctensorsLen := *(*C.int)(unsafe.Pointer(&tensorsLen)) +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_stack_out(ptr, out, ctensorsDataPtr, ctensorsLen, cdim) +} +func AtgStd(ptr *Ctensor, self Ctensor, unbiased int32){ +cunbiased := *(*C.int)(unsafe.Pointer(&unbiased)) +C.atg_std(ptr, self, cunbiased) +} +func AtgStd1(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, unbiased int32, keepdim int32){ +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +cunbiased := *(*C.int)(unsafe.Pointer(&unbiased)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_std1(ptr, self, cdimDataPtr, cdimLen, cunbiased, ckeepdim) +} +func AtgStdMean(ptr *Ctensor, self Ctensor, unbiased int32){ +cunbiased := *(*C.int)(unsafe.Pointer(&unbiased)) +C.atg_std_mean(ptr, self, cunbiased) +} +func AtgStdMean1(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, unbiased int32, keepdim int32){ +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +cunbiased := *(*C.int)(unsafe.Pointer(&unbiased)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_std_mean1(ptr, self, cdimDataPtr, cdimLen, cunbiased, ckeepdim) +} +func AtgStdOut(ptr *Ctensor, out Ctensor, self Ctensor, dimData []int64, dimLen int, unbiased int32, keepdim int32){ +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +cunbiased := *(*C.int)(unsafe.Pointer(&unbiased)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_std_out(ptr, out, self, cdimDataPtr, cdimLen, cunbiased, ckeepdim) +} +func AtgStft(ptr *Ctensor, self Ctensor, nFft int64, hopLengthVal int64, hopLengthNull int, winLengthVal int64, winLengthNull int, window Ctensor, normalized int32, onesided int32, returnComplex int32){ +cnFft := *(*C.int64_t)(unsafe.Pointer(&nFft)) +chopLengthVal := *(*C.int64_t)(unsafe.Pointer(&hopLengthVal)) +chopLengthNull := *(*C.uint8_t)(unsafe.Pointer(&hopLengthNull)) +cwinLengthVal := *(*C.int64_t)(unsafe.Pointer(&winLengthVal)) +cwinLengthNull := *(*C.uint8_t)(unsafe.Pointer(&winLengthNull)) +cnormalized := *(*C.int)(unsafe.Pointer(&normalized)) +conesided := *(*C.int)(unsafe.Pointer(&onesided)) +creturnComplex := *(*C.int)(unsafe.Pointer(&returnComplex)) +C.atg_stft(ptr, self, cnFft, chopLengthVal, chopLengthNull, cwinLengthVal, cwinLengthNull, window, cnormalized, conesided, creturnComplex) +} +func AtgSub(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_sub(ptr, self, other) +} +func AtgSub1(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_sub1(ptr, self, other ) +} +func AtgSub_(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_sub_(ptr, self, other) +} +func AtgSub1_(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_sub_1(ptr, self, other ) +} +func AtgSubOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ +C.atg_sub_out(ptr, out, self, other) +} +func AtgSubtract(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_subtract(ptr, self, other) +} +func AtgSubtract1(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_subtract1(ptr, self, other ) +} +func AtgSubtract_(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_subtract_(ptr, self, other) +} +func AtgSubtract1_(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_subtract_1(ptr, self, other ) +} +func AtgSubtractOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ +C.atg_subtract_out(ptr, out, self, other) +} +func AtgSum(ptr *Ctensor, self Ctensor, dtype int32){ +cdtype := *(*C.int)(unsafe.Pointer(&dtype)) +C.atg_sum(ptr, self, cdtype) +} +func AtgSum1(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32, dtype int32){ +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +cdtype := *(*C.int)(unsafe.Pointer(&dtype)) +C.atg_sum1(ptr, self, cdimDataPtr, cdimLen, ckeepdim, cdtype) +} +func AtgSumOut(ptr *Ctensor, out Ctensor, self Ctensor, dimData []int64, dimLen int, keepdim int32, dtype int32){ +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +cdtype := *(*C.int)(unsafe.Pointer(&dtype)) +C.atg_sum_out(ptr, out, self, cdimDataPtr, cdimLen, ckeepdim, cdtype) +} +func AtgSumToSize(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int){ +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +C.atg_sum_to_size(ptr, self, csizeDataPtr, csizeLen) +} +func AtgSvd(ptr *Ctensor, self Ctensor, some int32, computeUv int32){ +csome := *(*C.int)(unsafe.Pointer(&some)) +ccomputeUv := *(*C.int)(unsafe.Pointer(&computeUv)) +C.atg_svd(ptr, self, csome, ccomputeUv) +} +func AtgSvdOut(ptr *Ctensor, u Ctensor, s Ctensor, v Ctensor, self Ctensor, some int32, computeUv int32){ +csome := *(*C.int)(unsafe.Pointer(&some)) +ccomputeUv := *(*C.int)(unsafe.Pointer(&computeUv)) +C.atg_svd_out(ptr, u, s, v, self, csome, ccomputeUv) +} +func AtgSymeig(ptr *Ctensor, self Ctensor, eigenvectors int32, upper int32){ +ceigenvectors := *(*C.int)(unsafe.Pointer(&eigenvectors)) +cupper := *(*C.int)(unsafe.Pointer(&upper)) +C.atg_symeig(ptr, self, ceigenvectors, cupper) +} +func AtgSymeigOut(ptr *Ctensor, e Ctensor, v Ctensor, self Ctensor, eigenvectors int32, upper int32){ +ceigenvectors := *(*C.int)(unsafe.Pointer(&eigenvectors)) +cupper := *(*C.int)(unsafe.Pointer(&upper)) +C.atg_symeig_out(ptr, e, v, self, ceigenvectors, cupper) +} +func AtgT(ptr *Ctensor, self Ctensor){ +C.atg_t(ptr, self) +} +func AtgT_(ptr *Ctensor, self Ctensor){ +C.atg_t_(ptr, self) +} +func AtgTake(ptr *Ctensor, self Ctensor, index Ctensor){ +C.atg_take(ptr, self, index) +} +func AtgTakeBackward(ptr *Ctensor, grad Ctensor, input Ctensor, index Ctensor){ +C.atg_take_backward(ptr, grad, input, index) +} +func AtgTakeOut(ptr *Ctensor, out Ctensor, self Ctensor, index Ctensor){ +C.atg_take_out(ptr, out, self, index) +} +func AtgTan(ptr *Ctensor, self Ctensor){ +C.atg_tan(ptr, self) +} +func AtgTan_(ptr *Ctensor, self Ctensor){ +C.atg_tan_(ptr, self) +} +func AtgTanOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_tan_out(ptr, out, self) +} +func AtgTanh(ptr *Ctensor, self Ctensor){ +C.atg_tanh(ptr, self) +} +func AtgTanh_(ptr *Ctensor, self Ctensor){ +C.atg_tanh_(ptr, self) +} +func AtgTanhBackward(ptr *Ctensor, gradOutput Ctensor, output Ctensor){ +C.atg_tanh_backward(ptr, gradOutput, output) +} +func AtgTanhBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, output Ctensor){ +C.atg_tanh_backward_out(ptr, gradInput, gradOutput, output) +} +func AtgTanhOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_tanh_out(ptr, out, self) +} +func AtgTensordot(ptr *Ctensor, self Ctensor, other Ctensor, dimsSelfData []int64, dimsSelfLen int, dimsOtherData []int64, dimsOtherLen int){ +cdimsSelfDataPtr := (*C.int64_t)(unsafe.Pointer(&dimsSelfData[0])) +cdimsSelfLen := *(*C.int)(unsafe.Pointer(&dimsSelfLen)) +cdimsOtherDataPtr := (*C.int64_t)(unsafe.Pointer(&dimsOtherData[0])) +cdimsOtherLen := *(*C.int)(unsafe.Pointer(&dimsOtherLen)) +C.atg_tensordot(ptr, self, other, cdimsSelfDataPtr, cdimsSelfLen, cdimsOtherDataPtr, cdimsOtherLen) +} +func AtgThreshold(ptr *Ctensor, self Ctensor, threshold Cscalar, value Cscalar){ +C.atg_threshold(ptr, self, threshold , value ) +} +func AtgThreshold_(ptr *Ctensor, self Ctensor, threshold Cscalar, value Cscalar){ +C.atg_threshold_(ptr, self, threshold , value ) +} +func AtgThresholdBackward(ptr *Ctensor, gradOutput Ctensor, self Ctensor, threshold Cscalar){ +C.atg_threshold_backward(ptr, gradOutput, self, threshold ) +} +func AtgThresholdOut(ptr *Ctensor, out Ctensor, self Ctensor, threshold Cscalar, value Cscalar){ +C.atg_threshold_out(ptr, out, self, threshold , value ) +} +func AtgTo(ptr *Ctensor, self Ctensor, device int32){ +cdevice := *(*C.int)(unsafe.Pointer(&device)) +C.atg_to(ptr, self, cdevice) +} +func AtgTo1(ptr *Ctensor, self Ctensor, optionsKind int32, optionsDevice int32, nonBlocking int32, copy int32){ +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) +ccopy := *(*C.int)(unsafe.Pointer(©)) +C.atg_to1(ptr, self, coptionsKind, coptionsDevice, cnonBlocking, ccopy) +} +func AtgTo2(ptr *Ctensor, self Ctensor, dtype int32, nonBlocking int32, copy int32){ +cdtype := *(*C.int)(unsafe.Pointer(&dtype)) +cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) +ccopy := *(*C.int)(unsafe.Pointer(©)) +C.atg_to2(ptr, self, cdtype, cnonBlocking, ccopy) +} +func AtgTo3(ptr *Ctensor, self Ctensor, other Ctensor, nonBlocking int32, copy int32){ +cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) +ccopy := *(*C.int)(unsafe.Pointer(©)) +C.atg_to3(ptr, self, other, cnonBlocking, ccopy) +} +func AtgTo4(ptr *Ctensor, self Ctensor, device int32, dtype int32, nonBlocking int32, copy int32){ +cdevice := *(*C.int)(unsafe.Pointer(&device)) +cdtype := *(*C.int)(unsafe.Pointer(&dtype)) +cnonBlocking := *(*C.int)(unsafe.Pointer(&nonBlocking)) +ccopy := *(*C.int)(unsafe.Pointer(©)) +C.atg_to4(ptr, self, cdevice, cdtype, cnonBlocking, ccopy) +} +func AtgToDense(ptr *Ctensor, self Ctensor){ +C.atg_to_dense(ptr, self) +} +func AtgToDenseBackward(ptr *Ctensor, grad Ctensor, input Ctensor){ +C.atg_to_dense_backward(ptr, grad, input) +} +func AtgToMkldnn(ptr *Ctensor, self Ctensor){ +C.atg_to_mkldnn(ptr, self) +} +func AtgToMkldnnBackward(ptr *Ctensor, grad Ctensor, input Ctensor){ +C.atg_to_mkldnn_backward(ptr, grad, input) +} +func AtgToSparse(ptr *Ctensor, self Ctensor){ +C.atg_to_sparse(ptr, self) +} +func AtgToSparse1(ptr *Ctensor, self Ctensor, sparseDim int64){ +csparseDim := *(*C.int64_t)(unsafe.Pointer(&sparseDim)) +C.atg_to_sparse1(ptr, self, csparseDim) +} +func AtgTopk(ptr *Ctensor, self Ctensor, k int64, dim int64, largest int32, sorted int32){ +ck := *(*C.int64_t)(unsafe.Pointer(&k)) +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +clargest := *(*C.int)(unsafe.Pointer(&largest)) +csorted := *(*C.int)(unsafe.Pointer(&sorted)) +C.atg_topk(ptr, self, ck, cdim, clargest, csorted) +} +func AtgTopkOut(ptr *Ctensor, values Ctensor, indices Ctensor, self Ctensor, k int64, dim int64, largest int32, sorted int32){ +ck := *(*C.int64_t)(unsafe.Pointer(&k)) +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +clargest := *(*C.int)(unsafe.Pointer(&largest)) +csorted := *(*C.int)(unsafe.Pointer(&sorted)) +C.atg_topk_out(ptr, values, indices, self, ck, cdim, clargest, csorted) +} +func AtgTotype(ptr *Ctensor, self Ctensor, scalarType int32){ +cscalarType := *(*C.int)(unsafe.Pointer(&scalarType)) +C.atg_totype(ptr, self, cscalarType) +} +func AtgTrace(ptr *Ctensor, self Ctensor){ +C.atg_trace(ptr, self) +} +func AtgTraceBackward(ptr *Ctensor, grad Ctensor, sizesData []int64, sizesLen int){ +csizesDataPtr := (*C.int64_t)(unsafe.Pointer(&sizesData[0])) +csizesLen := *(*C.int)(unsafe.Pointer(&sizesLen)) +C.atg_trace_backward(ptr, grad, csizesDataPtr, csizesLen) +} +func AtgTranspose(ptr *Ctensor, self Ctensor, dim0 int64, dim1 int64){ +cdim0 := *(*C.int64_t)(unsafe.Pointer(&dim0)) +cdim1 := *(*C.int64_t)(unsafe.Pointer(&dim1)) +C.atg_transpose(ptr, self, cdim0, cdim1) +} +func AtgTranspose_(ptr *Ctensor, self Ctensor, dim0 int64, dim1 int64){ +cdim0 := *(*C.int64_t)(unsafe.Pointer(&dim0)) +cdim1 := *(*C.int64_t)(unsafe.Pointer(&dim1)) +C.atg_transpose_(ptr, self, cdim0, cdim1) +} +func AtgTrapz(ptr *Ctensor, y Ctensor, x Ctensor, dim int64){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_trapz(ptr, y, x, cdim) +} +func AtgTrapz1(ptr *Ctensor, y Ctensor, dx float64, dim int64){ +cdx := *(*C.double)(unsafe.Pointer(&dx)) +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_trapz1(ptr, y, cdx, cdim) +} +func AtgTriangularSolve(ptr *Ctensor, self Ctensor, a Ctensor, upper int32, transpose int32, unitriangular int32){ +cupper := *(*C.int)(unsafe.Pointer(&upper)) +ctranspose := *(*C.int)(unsafe.Pointer(&transpose)) +cunitriangular := *(*C.int)(unsafe.Pointer(&unitriangular)) +C.atg_triangular_solve(ptr, self, a, cupper, ctranspose, cunitriangular) +} +func AtgTriangularSolveOut(ptr *Ctensor, x Ctensor, m Ctensor, self Ctensor, a Ctensor, upper int32, transpose int32, unitriangular int32){ +cupper := *(*C.int)(unsafe.Pointer(&upper)) +ctranspose := *(*C.int)(unsafe.Pointer(&transpose)) +cunitriangular := *(*C.int)(unsafe.Pointer(&unitriangular)) +C.atg_triangular_solve_out(ptr, x, m, self, a, cupper, ctranspose, cunitriangular) +} +func AtgTril(ptr *Ctensor, self Ctensor, diagonal int64){ +cdiagonal := *(*C.int64_t)(unsafe.Pointer(&diagonal)) +C.atg_tril(ptr, self, cdiagonal) +} +func AtgTril_(ptr *Ctensor, self Ctensor, diagonal int64){ +cdiagonal := *(*C.int64_t)(unsafe.Pointer(&diagonal)) +C.atg_tril_(ptr, self, cdiagonal) +} +func AtgTrilIndices(ptr *Ctensor, row int64, col int64, offset int64, optionsKind int32, optionsDevice int32){ +crow := *(*C.int64_t)(unsafe.Pointer(&row)) +ccol := *(*C.int64_t)(unsafe.Pointer(&col)) +coffset := *(*C.int64_t)(unsafe.Pointer(&offset)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_tril_indices(ptr, crow, ccol, coffset, coptionsKind, coptionsDevice) +} +func AtgTrilOut(ptr *Ctensor, out Ctensor, self Ctensor, diagonal int64){ +cdiagonal := *(*C.int64_t)(unsafe.Pointer(&diagonal)) +C.atg_tril_out(ptr, out, self, cdiagonal) +} +func AtgTripletMarginLoss(ptr *Ctensor, anchor Ctensor, positive Ctensor, negative Ctensor, margin float64, p float64, eps float64, swap int32, reduction int64){ +cmargin := *(*C.double)(unsafe.Pointer(&margin)) +cp := *(*C.double)(unsafe.Pointer(&p)) +ceps := *(*C.double)(unsafe.Pointer(&eps)) +cswap := *(*C.int)(unsafe.Pointer(&swap)) +creduction := *(*C.int64_t)(unsafe.Pointer(&reduction)) +C.atg_triplet_margin_loss(ptr, anchor, positive, negative, cmargin, cp, ceps, cswap, creduction) +} +func AtgTriu(ptr *Ctensor, self Ctensor, diagonal int64){ +cdiagonal := *(*C.int64_t)(unsafe.Pointer(&diagonal)) +C.atg_triu(ptr, self, cdiagonal) +} +func AtgTriu_(ptr *Ctensor, self Ctensor, diagonal int64){ +cdiagonal := *(*C.int64_t)(unsafe.Pointer(&diagonal)) +C.atg_triu_(ptr, self, cdiagonal) +} +func AtgTriuIndices(ptr *Ctensor, row int64, col int64, offset int64, optionsKind int32, optionsDevice int32){ +crow := *(*C.int64_t)(unsafe.Pointer(&row)) +ccol := *(*C.int64_t)(unsafe.Pointer(&col)) +coffset := *(*C.int64_t)(unsafe.Pointer(&offset)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_triu_indices(ptr, crow, ccol, coffset, coptionsKind, coptionsDevice) +} +func AtgTriuOut(ptr *Ctensor, out Ctensor, self Ctensor, diagonal int64){ +cdiagonal := *(*C.int64_t)(unsafe.Pointer(&diagonal)) +C.atg_triu_out(ptr, out, self, cdiagonal) +} +func AtgTrueDivide(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_true_divide(ptr, self, other) +} +func AtgTrueDivide1(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_true_divide1(ptr, self, other ) +} +func AtgTrueDivide_(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_true_divide_(ptr, self, other) +} +func AtgTrueDivide1_(ptr *Ctensor, self Ctensor, other Cscalar){ +C.atg_true_divide_1(ptr, self, other ) +} +func AtgTrueDivideOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ +C.atg_true_divide_out(ptr, out, self, other) +} +func AtgTrunc(ptr *Ctensor, self Ctensor){ +C.atg_trunc(ptr, self) +} +func AtgTrunc_(ptr *Ctensor, self Ctensor){ +C.atg_trunc_(ptr, self) +} +func AtgTruncOut(ptr *Ctensor, out Ctensor, self Ctensor){ +C.atg_trunc_out(ptr, out, self) +} +func AtgTypeAs(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_type_as(ptr, self, other) +} + +func AtgUnflatten(ptr *Ctensor, self Ctensor, dim int64, sizesData []int64, sizesLen int){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +csizesDataPtr := (*C.int64_t)(unsafe.Pointer(&sizesData[0])) +csizesLen := *(*C.int)(unsafe.Pointer(&sizesLen)) +C.atg_unflatten(ptr, self, cdim, csizesDataPtr, csizesLen) +} +func AtgUnfold(ptr *Ctensor, self Ctensor, dimension int64, size int64, step int64){ +cdimension := *(*C.int64_t)(unsafe.Pointer(&dimension)) +csize := *(*C.int64_t)(unsafe.Pointer(&size)) +cstep := *(*C.int64_t)(unsafe.Pointer(&step)) +C.atg_unfold(ptr, self, cdimension, csize, cstep) +} +func AtgUnfoldBackward(ptr *Ctensor, gradIn Ctensor, inputSizesData []int64, inputSizesLen int, dim int64, size int64, step int64){ +cinputSizesDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizesData[0])) +cinputSizesLen := *(*C.int)(unsafe.Pointer(&inputSizesLen)) +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +csize := *(*C.int64_t)(unsafe.Pointer(&size)) +cstep := *(*C.int64_t)(unsafe.Pointer(&step)) +C.atg_unfold_backward(ptr, gradIn, cinputSizesDataPtr, cinputSizesLen, cdim, csize, cstep) +} +func AtgUniform_(ptr *Ctensor, self Ctensor, from float64, to float64){ +cfrom := *(*C.double)(unsafe.Pointer(&from)) +cto := *(*C.double)(unsafe.Pointer(&to)) +C.atg_uniform_(ptr, self, cfrom, cto) +} +func AtgUniqueConsecutive(ptr *Ctensor, self Ctensor, returnInverse int32, returnCounts int32, dimVal int64, dimNull int){ +creturnInverse := *(*C.int)(unsafe.Pointer(&returnInverse)) +creturnCounts := *(*C.int)(unsafe.Pointer(&returnCounts)) +cdimVal := *(*C.int64_t)(unsafe.Pointer(&dimVal)) +cdimNull := *(*C.uint8_t)(unsafe.Pointer(&dimNull)) +C.atg_unique_consecutive(ptr, self, creturnInverse, creturnCounts, cdimVal, cdimNull) +} +func AtgUniqueDim(ptr *Ctensor, self Ctensor, dim int64, sorted int32, returnInverse int32, returnCounts int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +csorted := *(*C.int)(unsafe.Pointer(&sorted)) +creturnInverse := *(*C.int)(unsafe.Pointer(&returnInverse)) +creturnCounts := *(*C.int)(unsafe.Pointer(&returnCounts)) +C.atg_unique_dim(ptr, self, cdim, csorted, creturnInverse, creturnCounts) +} +func AtgUniqueDimConsecutive(ptr *Ctensor, self Ctensor, dim int64, returnInverse int32, returnCounts int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +creturnInverse := *(*C.int)(unsafe.Pointer(&returnInverse)) +creturnCounts := *(*C.int)(unsafe.Pointer(&returnCounts)) +C.atg_unique_dim_consecutive(ptr, self, cdim, creturnInverse, creturnCounts) +} + + + +func AtgUnsqueeze(ptr *Ctensor, self Ctensor, dim int64){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_unsqueeze(ptr, self, cdim) +} +func AtgUnsqueeze_(ptr *Ctensor, self Ctensor, dim int64){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +C.atg_unsqueeze_(ptr, self, cdim) +} +func AtgUpsampleBicubic2d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, alignCorners int32, scalesHVal float64, scalesHNull int, scalesWVal float64, scalesWNull int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) +cscalesHVal := *(*C.double)(unsafe.Pointer(&scalesHVal)) +cscalesHNull := *(*C.uint8_t)(unsafe.Pointer(&scalesHNull)) +cscalesWVal := *(*C.double)(unsafe.Pointer(&scalesWVal)) +cscalesWNull := *(*C.uint8_t)(unsafe.Pointer(&scalesWNull)) +C.atg_upsample_bicubic2d(ptr, self, coutputSizeDataPtr, coutputSizeLen, calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) +} +func AtgUpsampleBicubic2dBackward(ptr *Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, alignCorners int32, scalesHVal float64, scalesHNull int, scalesWVal float64, scalesWNull int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) +cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) +calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) +cscalesHVal := *(*C.double)(unsafe.Pointer(&scalesHVal)) +cscalesHNull := *(*C.uint8_t)(unsafe.Pointer(&scalesHNull)) +cscalesWVal := *(*C.double)(unsafe.Pointer(&scalesWVal)) +cscalesWNull := *(*C.uint8_t)(unsafe.Pointer(&scalesWNull)) +C.atg_upsample_bicubic2d_backward(ptr, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) +} +func AtgUpsampleBicubic2dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, alignCorners int32, scalesHVal float64, scalesHNull int, scalesWVal float64, scalesWNull int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) +cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) +calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) +cscalesHVal := *(*C.double)(unsafe.Pointer(&scalesHVal)) +cscalesHNull := *(*C.uint8_t)(unsafe.Pointer(&scalesHNull)) +cscalesWVal := *(*C.double)(unsafe.Pointer(&scalesWVal)) +cscalesWNull := *(*C.uint8_t)(unsafe.Pointer(&scalesWNull)) +C.atg_upsample_bicubic2d_backward_out(ptr, gradInput, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) +} +func AtgUpsampleBicubic2dOut(ptr *Ctensor, out Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, alignCorners int32, scalesHVal float64, scalesHNull int, scalesWVal float64, scalesWNull int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) +cscalesHVal := *(*C.double)(unsafe.Pointer(&scalesHVal)) +cscalesHNull := *(*C.uint8_t)(unsafe.Pointer(&scalesHNull)) +cscalesWVal := *(*C.double)(unsafe.Pointer(&scalesWVal)) +cscalesWNull := *(*C.uint8_t)(unsafe.Pointer(&scalesWNull)) +C.atg_upsample_bicubic2d_out(ptr, out, self, coutputSizeDataPtr, coutputSizeLen, calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) +} +func AtgUpsampleBilinear2d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, alignCorners int32, scalesHVal float64, scalesHNull int, scalesWVal float64, scalesWNull int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) +cscalesHVal := *(*C.double)(unsafe.Pointer(&scalesHVal)) +cscalesHNull := *(*C.uint8_t)(unsafe.Pointer(&scalesHNull)) +cscalesWVal := *(*C.double)(unsafe.Pointer(&scalesWVal)) +cscalesWNull := *(*C.uint8_t)(unsafe.Pointer(&scalesWNull)) +C.atg_upsample_bilinear2d(ptr, self, coutputSizeDataPtr, coutputSizeLen, calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) +} +func AtgUpsampleBilinear2dBackward(ptr *Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, alignCorners int32, scalesHVal float64, scalesHNull int, scalesWVal float64, scalesWNull int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) +cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) +calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) +cscalesHVal := *(*C.double)(unsafe.Pointer(&scalesHVal)) +cscalesHNull := *(*C.uint8_t)(unsafe.Pointer(&scalesHNull)) +cscalesWVal := *(*C.double)(unsafe.Pointer(&scalesWVal)) +cscalesWNull := *(*C.uint8_t)(unsafe.Pointer(&scalesWNull)) +C.atg_upsample_bilinear2d_backward(ptr, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) +} +func AtgUpsampleBilinear2dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, alignCorners int32, scalesHVal float64, scalesHNull int, scalesWVal float64, scalesWNull int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) +cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) +calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) +cscalesHVal := *(*C.double)(unsafe.Pointer(&scalesHVal)) +cscalesHNull := *(*C.uint8_t)(unsafe.Pointer(&scalesHNull)) +cscalesWVal := *(*C.double)(unsafe.Pointer(&scalesWVal)) +cscalesWNull := *(*C.uint8_t)(unsafe.Pointer(&scalesWNull)) +C.atg_upsample_bilinear2d_backward_out(ptr, gradInput, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) +} +func AtgUpsampleBilinear2dOut(ptr *Ctensor, out Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, alignCorners int32, scalesHVal float64, scalesHNull int, scalesWVal float64, scalesWNull int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) +cscalesHVal := *(*C.double)(unsafe.Pointer(&scalesHVal)) +cscalesHNull := *(*C.uint8_t)(unsafe.Pointer(&scalesHNull)) +cscalesWVal := *(*C.double)(unsafe.Pointer(&scalesWVal)) +cscalesWNull := *(*C.uint8_t)(unsafe.Pointer(&scalesWNull)) +C.atg_upsample_bilinear2d_out(ptr, out, self, coutputSizeDataPtr, coutputSizeLen, calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) +} +func AtgUpsampleLinear1d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, alignCorners int32, scalesVal float64, scalesNull int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) +cscalesVal := *(*C.double)(unsafe.Pointer(&scalesVal)) +cscalesNull := *(*C.uint8_t)(unsafe.Pointer(&scalesNull)) +C.atg_upsample_linear1d(ptr, self, coutputSizeDataPtr, coutputSizeLen, calignCorners, cscalesVal, cscalesNull) +} +func AtgUpsampleLinear1dBackward(ptr *Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, alignCorners int32, scalesVal float64, scalesNull int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) +cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) +calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) +cscalesVal := *(*C.double)(unsafe.Pointer(&scalesVal)) +cscalesNull := *(*C.uint8_t)(unsafe.Pointer(&scalesNull)) +C.atg_upsample_linear1d_backward(ptr, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, calignCorners, cscalesVal, cscalesNull) +} +func AtgUpsampleLinear1dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, alignCorners int32, scalesVal float64, scalesNull int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) +cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) +calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) +cscalesVal := *(*C.double)(unsafe.Pointer(&scalesVal)) +cscalesNull := *(*C.uint8_t)(unsafe.Pointer(&scalesNull)) +C.atg_upsample_linear1d_backward_out(ptr, gradInput, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, calignCorners, cscalesVal, cscalesNull) +} +func AtgUpsampleLinear1dOut(ptr *Ctensor, out Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, alignCorners int32, scalesVal float64, scalesNull int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) +cscalesVal := *(*C.double)(unsafe.Pointer(&scalesVal)) +cscalesNull := *(*C.uint8_t)(unsafe.Pointer(&scalesNull)) +C.atg_upsample_linear1d_out(ptr, out, self, coutputSizeDataPtr, coutputSizeLen, calignCorners, cscalesVal, cscalesNull) +} +func AtgUpsampleNearest1d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, scalesVal float64, scalesNull int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +cscalesVal := *(*C.double)(unsafe.Pointer(&scalesVal)) +cscalesNull := *(*C.uint8_t)(unsafe.Pointer(&scalesNull)) +C.atg_upsample_nearest1d(ptr, self, coutputSizeDataPtr, coutputSizeLen, cscalesVal, cscalesNull) +} +func AtgUpsampleNearest1dBackward(ptr *Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, scalesVal float64, scalesNull int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) +cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) +cscalesVal := *(*C.double)(unsafe.Pointer(&scalesVal)) +cscalesNull := *(*C.uint8_t)(unsafe.Pointer(&scalesNull)) +C.atg_upsample_nearest1d_backward(ptr, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, cscalesVal, cscalesNull) +} +func AtgUpsampleNearest1dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, scalesVal float64, scalesNull int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) +cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) +cscalesVal := *(*C.double)(unsafe.Pointer(&scalesVal)) +cscalesNull := *(*C.uint8_t)(unsafe.Pointer(&scalesNull)) +C.atg_upsample_nearest1d_backward_out(ptr, gradInput, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, cscalesVal, cscalesNull) +} +func AtgUpsampleNearest1dOut(ptr *Ctensor, out Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, scalesVal float64, scalesNull int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +cscalesVal := *(*C.double)(unsafe.Pointer(&scalesVal)) +cscalesNull := *(*C.uint8_t)(unsafe.Pointer(&scalesNull)) +C.atg_upsample_nearest1d_out(ptr, out, self, coutputSizeDataPtr, coutputSizeLen, cscalesVal, cscalesNull) +} +func AtgUpsampleNearest2d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, scalesHVal float64, scalesHNull int, scalesWVal float64, scalesWNull int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +cscalesHVal := *(*C.double)(unsafe.Pointer(&scalesHVal)) +cscalesHNull := *(*C.uint8_t)(unsafe.Pointer(&scalesHNull)) +cscalesWVal := *(*C.double)(unsafe.Pointer(&scalesWVal)) +cscalesWNull := *(*C.uint8_t)(unsafe.Pointer(&scalesWNull)) +C.atg_upsample_nearest2d(ptr, self, coutputSizeDataPtr, coutputSizeLen, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) +} +func AtgUpsampleNearest2dBackward(ptr *Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, scalesHVal float64, scalesHNull int, scalesWVal float64, scalesWNull int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) +cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) +cscalesHVal := *(*C.double)(unsafe.Pointer(&scalesHVal)) +cscalesHNull := *(*C.uint8_t)(unsafe.Pointer(&scalesHNull)) +cscalesWVal := *(*C.double)(unsafe.Pointer(&scalesWVal)) +cscalesWNull := *(*C.uint8_t)(unsafe.Pointer(&scalesWNull)) +C.atg_upsample_nearest2d_backward(ptr, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) +} +func AtgUpsampleNearest2dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, scalesHVal float64, scalesHNull int, scalesWVal float64, scalesWNull int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) +cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) +cscalesHVal := *(*C.double)(unsafe.Pointer(&scalesHVal)) +cscalesHNull := *(*C.uint8_t)(unsafe.Pointer(&scalesHNull)) +cscalesWVal := *(*C.double)(unsafe.Pointer(&scalesWVal)) +cscalesWNull := *(*C.uint8_t)(unsafe.Pointer(&scalesWNull)) +C.atg_upsample_nearest2d_backward_out(ptr, gradInput, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) +} +func AtgUpsampleNearest2dOut(ptr *Ctensor, out Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, scalesHVal float64, scalesHNull int, scalesWVal float64, scalesWNull int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +cscalesHVal := *(*C.double)(unsafe.Pointer(&scalesHVal)) +cscalesHNull := *(*C.uint8_t)(unsafe.Pointer(&scalesHNull)) +cscalesWVal := *(*C.double)(unsafe.Pointer(&scalesWVal)) +cscalesWNull := *(*C.uint8_t)(unsafe.Pointer(&scalesWNull)) +C.atg_upsample_nearest2d_out(ptr, out, self, coutputSizeDataPtr, coutputSizeLen, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) +} +func AtgUpsampleNearest3d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, scalesDVal float64, scalesDNull int, scalesHVal float64, scalesHNull int, scalesWVal float64, scalesWNull int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +cscalesDVal := *(*C.double)(unsafe.Pointer(&scalesDVal)) +cscalesDNull := *(*C.uint8_t)(unsafe.Pointer(&scalesDNull)) +cscalesHVal := *(*C.double)(unsafe.Pointer(&scalesHVal)) +cscalesHNull := *(*C.uint8_t)(unsafe.Pointer(&scalesHNull)) +cscalesWVal := *(*C.double)(unsafe.Pointer(&scalesWVal)) +cscalesWNull := *(*C.uint8_t)(unsafe.Pointer(&scalesWNull)) +C.atg_upsample_nearest3d(ptr, self, coutputSizeDataPtr, coutputSizeLen, cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) +} +func AtgUpsampleNearest3dBackward(ptr *Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, scalesDVal float64, scalesDNull int, scalesHVal float64, scalesHNull int, scalesWVal float64, scalesWNull int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) +cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) +cscalesDVal := *(*C.double)(unsafe.Pointer(&scalesDVal)) +cscalesDNull := *(*C.uint8_t)(unsafe.Pointer(&scalesDNull)) +cscalesHVal := *(*C.double)(unsafe.Pointer(&scalesHVal)) +cscalesHNull := *(*C.uint8_t)(unsafe.Pointer(&scalesHNull)) +cscalesWVal := *(*C.double)(unsafe.Pointer(&scalesWVal)) +cscalesWNull := *(*C.uint8_t)(unsafe.Pointer(&scalesWNull)) +C.atg_upsample_nearest3d_backward(ptr, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) +} +func AtgUpsampleNearest3dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, scalesDVal float64, scalesDNull int, scalesHVal float64, scalesHNull int, scalesWVal float64, scalesWNull int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) +cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) +cscalesDVal := *(*C.double)(unsafe.Pointer(&scalesDVal)) +cscalesDNull := *(*C.uint8_t)(unsafe.Pointer(&scalesDNull)) +cscalesHVal := *(*C.double)(unsafe.Pointer(&scalesHVal)) +cscalesHNull := *(*C.uint8_t)(unsafe.Pointer(&scalesHNull)) +cscalesWVal := *(*C.double)(unsafe.Pointer(&scalesWVal)) +cscalesWNull := *(*C.uint8_t)(unsafe.Pointer(&scalesWNull)) +C.atg_upsample_nearest3d_backward_out(ptr, gradInput, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) +} +func AtgUpsampleNearest3dOut(ptr *Ctensor, out Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, scalesDVal float64, scalesDNull int, scalesHVal float64, scalesHNull int, scalesWVal float64, scalesWNull int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +cscalesDVal := *(*C.double)(unsafe.Pointer(&scalesDVal)) +cscalesDNull := *(*C.uint8_t)(unsafe.Pointer(&scalesDNull)) +cscalesHVal := *(*C.double)(unsafe.Pointer(&scalesHVal)) +cscalesHNull := *(*C.uint8_t)(unsafe.Pointer(&scalesHNull)) +cscalesWVal := *(*C.double)(unsafe.Pointer(&scalesWVal)) +cscalesWNull := *(*C.uint8_t)(unsafe.Pointer(&scalesWNull)) +C.atg_upsample_nearest3d_out(ptr, out, self, coutputSizeDataPtr, coutputSizeLen, cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) +} +func AtgUpsampleTrilinear3d(ptr *Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, alignCorners int32, scalesDVal float64, scalesDNull int, scalesHVal float64, scalesHNull int, scalesWVal float64, scalesWNull int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) +cscalesDVal := *(*C.double)(unsafe.Pointer(&scalesDVal)) +cscalesDNull := *(*C.uint8_t)(unsafe.Pointer(&scalesDNull)) +cscalesHVal := *(*C.double)(unsafe.Pointer(&scalesHVal)) +cscalesHNull := *(*C.uint8_t)(unsafe.Pointer(&scalesHNull)) +cscalesWVal := *(*C.double)(unsafe.Pointer(&scalesWVal)) +cscalesWNull := *(*C.uint8_t)(unsafe.Pointer(&scalesWNull)) +C.atg_upsample_trilinear3d(ptr, self, coutputSizeDataPtr, coutputSizeLen, calignCorners, cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) +} +func AtgUpsampleTrilinear3dBackward(ptr *Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, alignCorners int32, scalesDVal float64, scalesDNull int, scalesHVal float64, scalesHNull int, scalesWVal float64, scalesWNull int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) +cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) +calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) +cscalesDVal := *(*C.double)(unsafe.Pointer(&scalesDVal)) +cscalesDNull := *(*C.uint8_t)(unsafe.Pointer(&scalesDNull)) +cscalesHVal := *(*C.double)(unsafe.Pointer(&scalesHVal)) +cscalesHNull := *(*C.uint8_t)(unsafe.Pointer(&scalesHNull)) +cscalesWVal := *(*C.double)(unsafe.Pointer(&scalesWVal)) +cscalesWNull := *(*C.uint8_t)(unsafe.Pointer(&scalesWNull)) +C.atg_upsample_trilinear3d_backward(ptr, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, calignCorners, cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) +} +func AtgUpsampleTrilinear3dBackwardOut(ptr *Ctensor, gradInput Ctensor, gradOutput Ctensor, outputSizeData []int64, outputSizeLen int, inputSizeData []int64, inputSizeLen int, alignCorners int32, scalesDVal float64, scalesDNull int, scalesHVal float64, scalesHNull int, scalesWVal float64, scalesWNull int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +cinputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&inputSizeData[0])) +cinputSizeLen := *(*C.int)(unsafe.Pointer(&inputSizeLen)) +calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) +cscalesDVal := *(*C.double)(unsafe.Pointer(&scalesDVal)) +cscalesDNull := *(*C.uint8_t)(unsafe.Pointer(&scalesDNull)) +cscalesHVal := *(*C.double)(unsafe.Pointer(&scalesHVal)) +cscalesHNull := *(*C.uint8_t)(unsafe.Pointer(&scalesHNull)) +cscalesWVal := *(*C.double)(unsafe.Pointer(&scalesWVal)) +cscalesWNull := *(*C.uint8_t)(unsafe.Pointer(&scalesWNull)) +C.atg_upsample_trilinear3d_backward_out(ptr, gradInput, gradOutput, coutputSizeDataPtr, coutputSizeLen, cinputSizeDataPtr, cinputSizeLen, calignCorners, cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) +} +func AtgUpsampleTrilinear3dOut(ptr *Ctensor, out Ctensor, self Ctensor, outputSizeData []int64, outputSizeLen int, alignCorners int32, scalesDVal float64, scalesDNull int, scalesHVal float64, scalesHNull int, scalesWVal float64, scalesWNull int){ +coutputSizeDataPtr := (*C.int64_t)(unsafe.Pointer(&outputSizeData[0])) +coutputSizeLen := *(*C.int)(unsafe.Pointer(&outputSizeLen)) +calignCorners := *(*C.int)(unsafe.Pointer(&alignCorners)) +cscalesDVal := *(*C.double)(unsafe.Pointer(&scalesDVal)) +cscalesDNull := *(*C.uint8_t)(unsafe.Pointer(&scalesDNull)) +cscalesHVal := *(*C.double)(unsafe.Pointer(&scalesHVal)) +cscalesHNull := *(*C.uint8_t)(unsafe.Pointer(&scalesHNull)) +cscalesWVal := *(*C.double)(unsafe.Pointer(&scalesWVal)) +cscalesWNull := *(*C.uint8_t)(unsafe.Pointer(&scalesWNull)) +C.atg_upsample_trilinear3d_out(ptr, out, self, coutputSizeDataPtr, coutputSizeLen, calignCorners, cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) +} +func AtgValueSelectingReductionBackward(ptr *Ctensor, grad Ctensor, dim int64, indices Ctensor, sizesData []int64, sizesLen int, keepdim int32){ +cdim := *(*C.int64_t)(unsafe.Pointer(&dim)) +csizesDataPtr := (*C.int64_t)(unsafe.Pointer(&sizesData[0])) +csizesLen := *(*C.int)(unsafe.Pointer(&sizesLen)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_value_selecting_reduction_backward(ptr, grad, cdim, indices, csizesDataPtr, csizesLen, ckeepdim) +} +func AtgValues(ptr *Ctensor, self Ctensor){ +C.atg_values(ptr, self) +} +func AtgVander(ptr *Ctensor, x Ctensor, nVal int64, nNull int, increasing int32){ +cnVal := *(*C.int64_t)(unsafe.Pointer(&nVal)) +cnNull := *(*C.uint8_t)(unsafe.Pointer(&nNull)) +cincreasing := *(*C.int)(unsafe.Pointer(&increasing)) +C.atg_vander(ptr, x, cnVal, cnNull, cincreasing) +} +func AtgVar(ptr *Ctensor, self Ctensor, unbiased int32){ +cunbiased := *(*C.int)(unsafe.Pointer(&unbiased)) +C.atg_var(ptr, self, cunbiased) +} +func AtgVar1(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, unbiased int32, keepdim int32){ +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +cunbiased := *(*C.int)(unsafe.Pointer(&unbiased)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_var1(ptr, self, cdimDataPtr, cdimLen, cunbiased, ckeepdim) +} +func AtgVarMean(ptr *Ctensor, self Ctensor, unbiased int32){ +cunbiased := *(*C.int)(unsafe.Pointer(&unbiased)) +C.atg_var_mean(ptr, self, cunbiased) +} +func AtgVarMean1(ptr *Ctensor, self Ctensor, dimData []int64, dimLen int, unbiased int32, keepdim int32){ +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +cunbiased := *(*C.int)(unsafe.Pointer(&unbiased)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_var_mean1(ptr, self, cdimDataPtr, cdimLen, cunbiased, ckeepdim) +} +func AtgVarOut(ptr *Ctensor, out Ctensor, self Ctensor, dimData []int64, dimLen int, unbiased int32, keepdim int32){ +cdimDataPtr := (*C.int64_t)(unsafe.Pointer(&dimData[0])) +cdimLen := *(*C.int)(unsafe.Pointer(&dimLen)) +cunbiased := *(*C.int)(unsafe.Pointer(&unbiased)) +ckeepdim := *(*C.int)(unsafe.Pointer(&keepdim)) +C.atg_var_out(ptr, out, self, cdimDataPtr, cdimLen, cunbiased, ckeepdim) +} +func AtgVdot(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_vdot(ptr, self, other) +} +func AtgVdotOut(ptr *Ctensor, out Ctensor, self Ctensor, other Ctensor){ +C.atg_vdot_out(ptr, out, self, other) +} +func AtgView(ptr *Ctensor, self Ctensor, sizeData []int64, sizeLen int){ +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +C.atg_view(ptr, self, csizeDataPtr, csizeLen) +} +func AtgViewAs(ptr *Ctensor, self Ctensor, other Ctensor){ +C.atg_view_as(ptr, self, other) +} +func AtgViewAsComplex(ptr *Ctensor, self Ctensor){ +C.atg_view_as_complex(ptr, self) +} +func AtgViewAsReal(ptr *Ctensor, self Ctensor){ +C.atg_view_as_real(ptr, self) +} +func AtgVstack(ptr *Ctensor, tensorsData []Ctensor, tensorsLen int){ +ctensorsDataPtr := (*Ctensor)(unsafe.Pointer(&tensorsData[0])) +ctensorsLen := *(*C.int)(unsafe.Pointer(&tensorsLen)) +C.atg_vstack(ptr, ctensorsDataPtr, ctensorsLen) +} +func AtgVstackOut(ptr *Ctensor, out Ctensor, tensorsData []Ctensor, tensorsLen int){ +ctensorsDataPtr := (*Ctensor)(unsafe.Pointer(&tensorsData[0])) +ctensorsLen := *(*C.int)(unsafe.Pointer(&tensorsLen)) +C.atg_vstack_out(ptr, out, ctensorsDataPtr, ctensorsLen) +} + +func AtgWhere1(ptr *Ctensor, condition Ctensor, self Ctensor, other Ctensor){ +C.atg_where1(ptr, condition, self, other) +} +func AtgWhere2(ptr *Ctensor, condition Ctensor, selfScalar Cscalar, other Ctensor){ +C.atg_where2(ptr, condition, selfScalar , other) +} +func AtgWhere3(ptr *Ctensor, condition Ctensor, self Ctensor, other Cscalar){ +C.atg_where3(ptr, condition, self, other ) +} +func AtgWhere4(ptr *Ctensor, condition Ctensor, selfScalar Cscalar, other Cscalar){ +C.atg_where4(ptr, condition, selfScalar , other ) +} +func AtgZero_(ptr *Ctensor, self Ctensor){ +C.atg_zero_(ptr, self) +} +func AtgZeros(ptr *Ctensor, sizeData []int64, sizeLen int, optionsKind int32, optionsDevice int32){ +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +coptionsKind := *(*C.int)(unsafe.Pointer(&optionsKind)) +coptionsDevice := *(*C.int)(unsafe.Pointer(&optionsDevice)) +C.atg_zeros(ptr, csizeDataPtr, csizeLen, coptionsKind, coptionsDevice) +} +func AtgZerosLike(ptr *Ctensor, self Ctensor){ +C.atg_zeros_like(ptr, self) +} +func AtgZerosOut(ptr *Ctensor, out Ctensor, sizeData []int64, sizeLen int){ +csizeDataPtr := (*C.int64_t)(unsafe.Pointer(&sizeData[0])) +csizeLen := *(*C.int)(unsafe.Pointer(&sizeLen)) +C.atg_zeros_out(ptr, out, csizeDataPtr, csizeLen) } diff --git a/libtch/tensor.go b/libtch/tensor.go index c6edb02..4e60c4c 100644 --- a/libtch/tensor.go +++ b/libtch/tensor.go @@ -401,8 +401,9 @@ func AtoSgd(learningRate, momentum, dampening, weightDecay float64, nesterov int return C.ato_sgd(clearningRate, cmomentum, cdampening, cweightDecay, cnesterov) } +// NOTE. Backward compat for param group not updated (#261) // void ato_add_parameters(optimizer, tensor *, int ntensors); -func AtoAddParameters(coptimizer Coptimizer, tensors []Ctensor, ntensors int) { +func AtoAddParametersOld(coptimizer Coptimizer, tensors []Ctensor, ntensors int) { var ctensors []C.tensor for i := 0; i < len(tensors); i++ { @@ -412,7 +413,23 @@ func AtoAddParameters(coptimizer Coptimizer, tensors []Ctensor, ntensors int) { cntensors := *(*C.int)(unsafe.Pointer(&ntensors)) // Just give pointer to the first element of ctensors slice - C.ato_add_parameters(coptimizer, &ctensors[0], cntensors) + C.ato_add_parameters_old(coptimizer, &ctensors[0], cntensors) +} + +// NOTE. This function is not working correctly. Need to update!!! +// DO NOT USE!!!!! +// TODO. updated +func AtoAddParameters(coptimizer Coptimizer, tensors []Ctensor, ntensors int) { + + var ctensors []C.tensor + for i := 0; i < len(tensors); i++ { + ctensors = append(ctensors, (C.tensor)(tensors[i])) + } + + cntensors := *(*C.size_t)(unsafe.Pointer(&ntensors)) + + // Just give pointer to the first element of ctensors slice + C.ato_add_parameters(coptimizer, ctensors[0], cntensors) } // void ato_set_learning_rate(optimizer, double learning_rate); diff --git a/libtch/torch_api.cpp b/libtch/torch_api.cpp index f17a0ea..0462519 100644 --- a/libtch/torch_api.cpp +++ b/libtch/torch_api.cpp @@ -1,5 +1,7 @@ #include +#include #include +#include #include #include #include @@ -43,6 +45,15 @@ tensor at_new_tensor() { return nullptr; } +tensor at_tensor_of_blob(void *data, int64_t *dims, size_t ndims, int64_t *strides, size_t nstrides, int type, int device) { + PROTECT( + at::TensorOptions blobOptions = at::TensorOptions().device(device_of_int(device)).dtype(torch::ScalarType(type)); + return new torch::Tensor(torch::from_blob(data, torch::IntArrayRef(dims, ndims), torch::IntArrayRef(strides, nstrides), blobOptions)); + ) + + return nullptr; +} + tensor at_tensor_of_data(void *vs, int64_t *dims, size_t ndims, size_t element_size_in_bytes, int type) { PROTECT( torch::Tensor tensor = torch::zeros(torch::IntArrayRef(dims, ndims), torch::ScalarType(type)); @@ -90,6 +101,11 @@ int at_defined(tensor t) { return -1; } +int at_is_mkldnn(tensor t) { + PROTECT(return t->is_mkldnn();) + return -1; +} + int at_is_sparse(tensor t) { PROTECT(return t->is_sparse();) return -1; @@ -107,6 +123,13 @@ void at_shape(tensor t, int64_t *dims) { ) } +void at_stride(tensor t, int64_t *dims) { + PROTECT( + int i = 0; + for (int64_t dim: t->strides()) dims[i++] = dim; + ) +} + int at_scalar_type(tensor t) { PROTECT( return static_cast(t->scalar_type()); @@ -114,6 +137,46 @@ int at_scalar_type(tensor t) { return -1; } +void at__amp_non_finite_check_and_unscale(tensor t, tensor found_inf, tensor inf_scale) { + PROTECT( + at::_amp_non_finite_check_and_unscale_(*t, *found_inf, *inf_scale); + ) +} + +void at_autocast_clear_cache() { + at::autocast::clear_cache(); +} + +int at_autocast_decrement_nesting() { + PROTECT( + return at::autocast::decrement_nesting(); + ) + return -1; +} + +int at_autocast_increment_nesting() { + PROTECT( + return at::autocast::increment_nesting(); + ) + return -1; +} + +bool at_autocast_is_enabled() { + PROTECT( + return at::autocast::is_enabled(); + ) + return -1; +} + +bool at_autocast_set_enabled(bool b) { + PROTECT( + bool is_enabled = at::autocast::is_enabled(); + at::autocast::set_enabled(b); + return is_enabled; + ) + return -1; +} + int at_device(tensor t) { PROTECT( auto device = t->device(); @@ -417,6 +480,20 @@ optimizer ato_adam(double learning_rate, return nullptr; } +optimizer ato_adamw(double learning_rate, + double beta1, + double beta2, + double weight_decay) { + PROTECT( + auto options = + torch::optim::AdamWOptions(learning_rate) + .betas(std::tuple(beta1, beta2)) + .weight_decay(weight_decay); + return new torch::optim::AdamW(vector(), options); + ) + return nullptr; +} + optimizer ato_rms_prop(double learning_rate, double alpha, double eps, @@ -453,24 +530,63 @@ optimizer ato_sgd(double learning_rate, return nullptr; } -void ato_add_parameters(optimizer t, tensor *tensors, int ntensors) { +// NOTE. backward compat as param group (#261) not updated yet. +void ato_add_parameters_old(optimizer t, tensor *tensors, int ntensors) { PROTECT( for (int i = 0; i < ntensors; ++i) t->param_groups()[0].params().push_back(*(tensors[i])); ) } +void ato_add_parameters(optimizer t, tensor tensor, size_t group) { + PROTECT( + auto &groups = t->param_groups(); + while (groups.size() <= group) { + groups.push_back(torch::optim::OptimizerParamGroup({}, t->defaults().clone())); + } + groups[group].params().push_back(*tensor); + ) +} + +template +void set_lr(optimizer t, double learning_rate) { + torch::optim::OptimizerOptions* d = &(t->defaults()); + if (auto p = dynamic_cast(d)) { + p->lr(learning_rate); + for (auto ¶m_group: t->param_groups()) { + torch::optim::OptimizerOptions* d = &(param_group.options()); + if (auto p2 = dynamic_cast(d)) { + p2->lr(learning_rate); + } + else throw std::invalid_argument("unexpected param group type"); + } + } +} + void ato_set_learning_rate(optimizer t, double learning_rate) { PROTECT( - torch::optim::OptimizerOptions* d = &(t->defaults()); - if (auto adam = dynamic_cast(d)) - adam->lr(learning_rate); - else if (auto rms = dynamic_cast(d)) - rms->lr(learning_rate); - else if (auto sgd = dynamic_cast(d)) - sgd->lr(learning_rate); - else - throw std::invalid_argument("unexpected optimizer"); + set_lr(t, learning_rate); + set_lr(t, learning_rate); + set_lr(t, learning_rate); + set_lr(t, learning_rate); + ) +} + +template +void set_lr_group(optimizer t, size_t group, double learning_rate) { + auto ¶m_group = t->param_groups().at(group); + torch::optim::OptimizerOptions* d = &(param_group.options()); + if (auto p = dynamic_cast(d)) { + p->lr(learning_rate); + } +} + +void ato_set_learning_rate_group(optimizer t, size_t group, double learning_rate) { + PROTECT( + set_lr_group(t, group, learning_rate); + set_lr_group(t, group, learning_rate); + set_lr_group(t, group, learning_rate); + set_lr_group(t, group, learning_rate); ) } @@ -480,16 +596,115 @@ void ato_set_momentum(optimizer t, double momentum) { if (auto adam = dynamic_cast(d)) { auto betas = adam->betas(); adam->betas(std::tuple(momentum, get<1>(betas))); + for (auto ¶m_group: t->param_groups()) { + torch::optim::OptimizerOptions* d = &(param_group.options()); + if (auto adam2 = dynamic_cast(d)) { + adam2->betas(std::tuple(momentum, get<1>(betas))); + } + else throw std::invalid_argument("unexpected param group type"); + } } - else if (auto rms = dynamic_cast(d)) - rms->momentum(momentum); - else if (auto sgd = dynamic_cast(d)) + else if (auto adamw = dynamic_cast(d)) { + auto betas = adamw->betas(); + adamw->betas(std::tuple(momentum, get<1>(betas))); + for (auto ¶m_group: t->param_groups()) { + torch::optim::OptimizerOptions* d = &(param_group.options()); + if (auto adamw2 = dynamic_cast(d)) { + adamw2->betas(std::tuple(momentum, get<1>(betas))); + } + else throw std::invalid_argument("unexpected param group type"); + } + } + else if (auto rms = dynamic_cast(d)) { + for (auto ¶m_group: t->param_groups()) { + torch::optim::OptimizerOptions* d = &(param_group.options()); + if (auto rms2 = dynamic_cast(d)) { + rms2->momentum(momentum); + } + else throw std::invalid_argument("unexpected param group type"); + } + } + else if (auto sgd = dynamic_cast(d)) { sgd->momentum(momentum); + for (auto ¶m_group: t->param_groups()) { + torch::optim::OptimizerOptions* d = &(param_group.options()); + if (auto sgd2 = dynamic_cast(d)) { + sgd2->momentum(momentum); + } + else throw std::invalid_argument("unexpected param group type"); + } + } else throw std::invalid_argument("unexpected optimizer"); ) } +void ato_set_momentum_group(optimizer t, size_t group, double momentum) { + PROTECT( + auto ¶m_group = t->param_groups().at(group); + torch::optim::OptimizerOptions* d = &(param_group.options()); + + if (auto adam = dynamic_cast(d)) { + auto betas = adam->betas(); + adam->betas(std::tuple(momentum, get<1>(betas))); + } + else if (auto adamw = dynamic_cast(d)) { + auto betas = adamw->betas(); + adamw->betas(std::tuple(momentum, get<1>(betas))); + } + else if (auto rms = dynamic_cast(d)) { + rms->momentum(momentum); + } + if (auto sgd = dynamic_cast(d)) { + sgd->momentum(momentum); + } + else + throw std::invalid_argument("unexpected optimizer"); + ) +} + +template +void set_weight_decay(optimizer t, double weight_decay) { + torch::optim::OptimizerOptions* d = &(t->defaults()); + if (auto p = dynamic_cast(d)) { + p->weight_decay(weight_decay); + for (auto ¶m_group: t->param_groups()) { + torch::optim::OptimizerOptions* d = &(param_group.options()); + if (auto p2 = dynamic_cast(d)) { + p2->weight_decay(weight_decay); + } + else throw std::invalid_argument("unexpected param group type"); + } + } +} + +void ato_set_weight_decay(optimizer t, double weight_decay) { + PROTECT( + set_weight_decay(t, weight_decay); + set_weight_decay(t, weight_decay); + set_weight_decay(t, weight_decay); + set_weight_decay(t, weight_decay); + ) +} + +template +void set_weight_decay_group(optimizer t, size_t group, double weight_decay) { + auto ¶m_group = t->param_groups().at(group); + torch::optim::OptimizerOptions* d = &(param_group.options()); + if (auto p = dynamic_cast(d)) { + p->weight_decay(weight_decay); + } +} + +void ato_set_weight_decay_group(optimizer t, size_t group, double weight_decay) { + PROTECT( + set_weight_decay_group(t, group, weight_decay); + set_weight_decay_group(t, group, weight_decay); + set_weight_decay_group(t, group, weight_decay); + set_weight_decay_group(t, group, weight_decay); + ) +} + void ato_zero_grad(optimizer t) { PROTECT(t->zero_grad();) } @@ -590,7 +805,7 @@ tensor atm_forward(module m, tensor *tensors, int ntensors) { std::vector inputs; for (int i = 0; i < ntensors; ++i) inputs.push_back(*(tensors[i])); - torch::jit::IValue output = m->forward(inputs); + torch::jit::IValue output = m->forward(std::move(inputs)); if (!output.isTensor()) throw std::invalid_argument("forward did not return a tensor"); return new torch::Tensor(output.toTensor()); @@ -605,7 +820,31 @@ ivalue atm_forward_(module m, std::vector inputs; for (int i = 0; i < nivalues; ++i) inputs.push_back(*(ivalues[i])); - torch::jit::IValue output = m->forward(inputs); + torch::jit::IValue output = m->forward(std::move(inputs)); + return new torch::jit::IValue(output); + ) + return nullptr; +} + +tensor atm_method(module m, char *method_name, tensor *tensors, int ntensors) { + PROTECT( + std::vector inputs; + for (int i = 0; i < ntensors; ++i) + inputs.push_back(*(tensors[i])); + torch::jit::IValue output = m->get_method(method_name)(std::move(inputs)); + if (!output.isTensor()) + throw std::invalid_argument("method did not return a tensor"); + return new torch::Tensor(output.toTensor()); + ) + return nullptr; +} + +ivalue atm_method_(module m, char *method_name, ivalue *ivalues, int nivalues) { + PROTECT( + std::vector inputs; + for (int i = 0; i < nivalues; ++i) + inputs.push_back(*(ivalues[i])); + torch::jit::IValue output = m->get_method(method_name)(std::move(inputs)); return new torch::jit::IValue(output); ) return nullptr; @@ -615,12 +854,40 @@ void atm_free(module m) { delete(m); } +void atm_save(module m, char *filename) { + PROTECT( + m->save(filename); + ) +} + void atm_to(module m, int device, int dtype, bool non_blocking) { PROTECT( m->to(device_of_int(device), at::ScalarType(dtype), non_blocking); ) } +int atm_get_profiling_mode() { + PROTECT( + return torch::jit::getProfilingMode(); + ) + return 0; +} + +void atm_set_profiling_mode(int b) { + PROTECT( + torch::jit::getProfilingMode() = (bool)b; + ) +} + +void atm_named_parameters(module m, void *data, void (*f)(void *, char *, tensor)) { + PROTECT( + for (const auto &p : m->named_parameters()) { + auto v = p.value; + f(data, (char*)p.name.c_str(), new torch::Tensor(v)); + } + ) +} + ivalue ati_tensor(tensor t) { PROTECT( return new torch::jit::IValue(*t); @@ -718,6 +985,15 @@ ivalue ati_bool_list(char *is, int nvalues) { return nullptr; } +ivalue ati_string_list(char **is, int nvalues) { + PROTECT( + c10::List vec; + for (int i = 0; i < nvalues; ++i) vec.push_back(string(is[i])); + return new torch::jit::IValue(vec); + ) + return nullptr; +} + ivalue ati_tensor_list(tensor *is, int nvalues) { PROTECT( c10::List vec; @@ -855,7 +1131,7 @@ void ati_to_int_list(ivalue i, PROTECT( auto vec = i->toIntList(); if (vec.size() != noutputs) { - throw std::invalid_argument("unexpected list size"); + throw std::invalid_argument("unexpected list size"); } for (int i = 0; i < noutputs; ++i) outputs[i] = vec[i]; @@ -868,7 +1144,7 @@ void ati_to_double_list(ivalue i, PROTECT( auto vec = i->toDoubleList(); if (vec.size() != noutputs) { - throw std::invalid_argument("unexpected list size"); + throw std::invalid_argument("unexpected list size"); } for (int i = 0; i < noutputs; ++i) outputs[i] = vec[i]; @@ -881,7 +1157,7 @@ void ati_to_bool_list(ivalue i, PROTECT( auto vec = i->toBoolList(); if (vec.size() != noutputs) { - throw std::invalid_argument("unexpected list size"); + throw std::invalid_argument("unexpected list size"); } for (int i = 0; i < noutputs; ++i) outputs[i] = vec[i]; @@ -894,7 +1170,7 @@ void ati_to_tensor_list(ivalue i, PROTECT( auto vec = i->toTensorList(); if (vec.size() != noutputs) { - throw std::invalid_argument("unexpected tuple size"); + throw std::invalid_argument("unexpected list size"); } for (int i = 0; i < noutputs; ++i) outputs[i] = new torch::Tensor(vec[i]); diff --git a/libtch/torch_api.h b/libtch/torch_api.h index 79054bb..8e29e46 100644 --- a/libtch/torch_api.h +++ b/libtch/torch_api.h @@ -1,6 +1,6 @@ #ifndef __TORCH_API_H__ #define __TORCH_API_H__ -#include +#include #ifdef __cplusplus thread_local char *torch_last_err = nullptr; @@ -11,11 +11,11 @@ typedef torch::Scalar *scalar; typedef torch::optim::Optimizer *optimizer; typedef torch::jit::script::Module *module; typedef torch::jit::IValue *ivalue; -#define PROTECT(x) \ - try { \ - x \ - } catch (const exception& e) { \ - torch_last_err = strdup(e.what()); \ +#define PROTECT(x) \ + try { \ + x \ + } catch (const exception &e) { \ + torch_last_err = strdup(e.what()); \ } #else typedef void *tensor; @@ -28,18 +28,33 @@ typedef void *ivalue; char *get_and_reset_last_err(); // thread-local void at_manual_seed(int64_t); tensor at_new_tensor(); -tensor at_tensor_of_data(void *vs, int64_t *dims, size_t ndims, size_t element_size_in_bytes, int type); -void at_copy_data(tensor tensor, void *vs, size_t numel, size_t element_size_in_bytes); +tensor at_tensor_of_blob(void *data, int64_t *dims, size_t ndims, + int64_t *strides, size_t nstrides, int type, + int device); +tensor at_tensor_of_data(void *vs, int64_t *dims, size_t ndims, + size_t element_size_in_bytes, int type); +void at_copy_data(tensor tensor, void *vs, size_t numel, + size_t element_size_in_bytes); tensor at_shallow_clone(tensor); void *at_data_ptr(tensor); int at_defined(tensor); +int at_is_mkldnn(tensor); int at_is_sparse(tensor); int at_device(tensor); size_t at_dim(tensor); void at_shape(tensor, int64_t *); +void at_stride(tensor, int64_t *); int at_scalar_type(tensor); +void at__amp_non_finite_check_and_unscale(tensor, tensor, tensor); + +void at_autocast_clear_cache(); +int at_autocast_decrement_nesting(); +int at_autocast_increment_nesting(); +bool at_autocast_is_enabled(); +bool at_autocast_set_enabled(bool b); + void at_backward(tensor, int, int); int at_requires_grad(tensor); int at_grad_set_enabled(int); @@ -50,8 +65,10 @@ void at_fill_int64(tensor, int64_t); double at_double_value_at_indexes(tensor, int64_t *indexes, int indexes_len); int64_t at_int64_value_at_indexes(tensor, int64_t *indexes, int indexes_len); -void at_set_double_value_at_indexes(tensor, int *indexes, int indexes_len, double v); -void at_set_int64_value_at_indexes(tensor, int *indexes, int indexes_len, int64_t v); +void at_set_double_value_at_indexes(tensor, int *indexes, int indexes_len, + double v); +void at_set_int64_value_at_indexes(tensor, int *indexes, int indexes_len, + int64_t v); void at_copy_(tensor dst, tensor src); @@ -63,14 +80,20 @@ tensor at_load_image(char *filename); int at_save_image(tensor, char *filename); tensor at_resize_image(tensor, int w, int h); -void at_save_multi(tensor *tensors, char **tensor_names, int ntensors, char *filename); +void at_save_multi(tensor *tensors, char **tensor_names, int ntensors, + char *filename); /* [at_load_multi] takes as input an array of nullptr for [tensors]. */ -void at_load_multi(tensor *tensors, char **tensor_names, int ntensors, char *filename); +void at_load_multi(tensor *tensors, char **tensor_names, int ntensors, + char *filename); /* [at_load_multi_] takes as input an array of allocation [tensors]. */ -void at_load_multi_(tensor *tensors, char **tensor_names, int ntensors, char *filename); +void at_load_multi_(tensor *tensors, char **tensor_names, int ntensors, + char *filename); -void at_load_callback(char *filename, void *data, void (*f)(void *, char *, tensor)); -void at_load_callback_with_device(char *filename, void *data, void (*f)(void *, char *, tensor), int device_id); +void at_load_callback(char *filename, void *data, + void (*f)(void *, char *, tensor)); +void at_load_callback_with_device(char *filename, void *data, + void (*f)(void *, char *, tensor), + int device_id); int at_get_num_interop_threads(); @@ -82,32 +105,27 @@ void at_set_num_threads(int n_threads); void at_free(tensor); -void at_run_backward(tensor *tensors, - int ntensors, - tensor *inputs, - int ninputs, - tensor *outputs, - int keep_graph, - int create_graph); +void at_run_backward(tensor *tensors, int ntensors, tensor *inputs, int ninputs, + tensor *outputs, int keep_graph, int create_graph); -optimizer ato_adam(double learning_rate, - double beta1, - double beta2, +optimizer ato_adam(double learning_rate, double beta1, double beta2, double weight_decay); -optimizer ato_rms_prop(double learning_rate, - double alpha, - double eps, - double weight_decay, - double momentum, - int centered); -optimizer ato_sgd(double learning_rate, - double momentum, - double dampening, - double weight_decay, - int nesterov); -void ato_add_parameters(optimizer, tensor *, int ntensors); +optimizer ato_adamw(double learning_rate, double beta1, double beta2, + double weight_decay); +optimizer ato_rms_prop(double learning_rate, double alpha, double eps, + double weight_decay, double momentum, int centered); +optimizer ato_sgd(double learning_rate, double momentum, double dampening, + double weight_decay, int nesterov); +// NOTE. switch back as param group #261 not updated yet. +// Backward compat +void ato_add_parameters_old(optimizer, tensor *, int ntensors); +void ato_add_parameters(optimizer, tensor, size_t group); void ato_set_learning_rate(optimizer, double learning_rate); void ato_set_momentum(optimizer, double momentum); +void ato_set_learning_rate_group(optimizer, size_t group, double learning_rate); +void ato_set_momentum_group(optimizer, size_t group, double momentum); +void ato_set_weight_decay(optimizer t, double weight_decay); +void ato_set_weight_decay_group(optimizer t, size_t group, double weight_decay); void ato_zero_grad(optimizer); void ato_step(optimizer); void ato_free(optimizer); @@ -129,11 +147,16 @@ module atm_load_on_device(char *, int device); module atm_load_str(char *, size_t sz); module atm_load_str_on_device(char *, size_t sz, int device); tensor atm_forward(module, tensor *tensors, int ntensors); -ivalue atm_forward_(module, - ivalue *ivalues, - int nivalues); +ivalue atm_forward_(module, ivalue *ivalues, int nivalues); +tensor atm_method(module, char *method_name, tensor *tensors, int ntensors); +ivalue atm_method_(module, char *method_name, ivalue *ivalues, int nivalues); void atm_free(module); void atm_to(module m, int device, int dtype, bool non_blocking); +void atm_save(module m, char *); +int atm_get_profiling_mode(); +void atm_set_profiling_mode(int); +void atm_named_parameters(module, void *data, + void (*f)(void *, char *, tensor)); ivalue ati_none(); ivalue ati_tensor(tensor); @@ -147,6 +170,7 @@ ivalue ati_generic_dict(ivalue *, int); ivalue ati_int_list(int64_t *, int); ivalue ati_double_list(double *, int); ivalue ati_bool_list(char *, int); +ivalue ati_string_list(char **, int); ivalue ati_tensor_list(tensor *, int); tensor ati_to_tensor(ivalue); diff --git a/libtch/torch_api_generated.cpp.h b/libtch/torch_api_generated.cpp.h index daccc90..c1dbf5f 100644 --- a/libtch/torch_api_generated.cpp.h +++ b/libtch/torch_api_generated.cpp.h @@ -154,27 +154,57 @@ void atg__adaptive_avg_pool2d_backward(tensor *out__, tensor grad_output, tensor ) } -void atg__addr(tensor *out__, tensor self, tensor vec1, tensor vec2) { +void atg__add_batch_dim(tensor *out__, tensor self, int64_t batch_dim, int64_t level) { PROTECT( - auto outputs__ = torch::_addr(*self, *vec1, *vec2); + auto outputs__ = torch::_add_batch_dim(*self, batch_dim, level); out__[0] = new torch::Tensor(outputs__); ) } -void atg__addr_(tensor *out__, tensor self, tensor vec1, tensor vec2) { +void atg__add_relu(tensor *out__, tensor self, tensor other) { PROTECT( - auto outputs__ = torch::_addr_(*self, *vec1, *vec2); + auto outputs__ = torch::_add_relu(*self, *other); out__[0] = new torch::Tensor(outputs__); ) } -void atg__addr_out(tensor *out__, tensor out, tensor self, tensor vec1, tensor vec2) { +void atg__add_relu_(tensor *out__, tensor self, tensor other) { PROTECT( - auto outputs__ = torch::_addr_out(*out, *self, *vec1, *vec2); + auto outputs__ = torch::_add_relu_(*self, *other); out__[0] = new torch::Tensor(outputs__); ) } +void atg__add_relu_out(tensor *out__, tensor out, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::_add_relu_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg__addmv_impl_(tensor *out__, tensor self, tensor self2, tensor mat, tensor vec) { + PROTECT( + auto outputs__ = torch::_addmv_impl_(*self, *self2, *mat, *vec); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg__aminmax(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::_aminmax(*self); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + ) +} + +void atg__aminmax1(tensor *out__, tensor self, int64_t dim, int keepdim) { + PROTECT( + auto outputs__ = torch::_aminmax(*self, dim, (bool)keepdim); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + ) +} + void atg__amp_update_scale(tensor *out__, tensor growth_tracker, tensor current_scale, tensor found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval) { PROTECT( auto outputs__ = torch::_amp_update_scale(*growth_tracker, *current_scale, *found_inf, scale_growth_factor, scale_backoff_factor, growth_interval); @@ -189,6 +219,20 @@ void atg__baddbmm_mkl_(tensor *out__, tensor self, tensor batch1, tensor batch2) ) } +void atg__bmm(tensor *out__, tensor self, tensor mat2, int deterministic) { + PROTECT( + auto outputs__ = torch::_bmm(*self, *mat2, (bool)deterministic); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg__bmm_out(tensor *out__, tensor out, tensor self, tensor mat2, int deterministic) { + PROTECT( + auto outputs__ = torch::_bmm_out(*out, *self, *mat2, (bool)deterministic); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg__cast_byte(tensor *out__, tensor self, int non_blocking) { PROTECT( auto outputs__ = torch::_cast_Byte(*self, (bool)non_blocking); @@ -287,6 +331,27 @@ void atg__coalesced_(tensor *out__, tensor self, int coalesced) { ) } +void atg__compute_linear_combination(tensor *out__, tensor input, tensor coefficients) { + PROTECT( + auto outputs__ = torch::_compute_linear_combination(*input, *coefficients); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg__compute_linear_combination_out(tensor *out__, tensor out, tensor input, tensor coefficients) { + PROTECT( + auto outputs__ = torch::_compute_linear_combination_out(*out, *input, *coefficients); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg__conj(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::_conj(*self); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg__convolution(tensor *out__, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int transposed, int64_t *output_padding_data, int output_padding_len, int64_t groups, int benchmark, int deterministic, int cudnn_enabled) { PROTECT( auto outputs__ = torch::_convolution(*input, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)transposed, torch::IntArrayRef(output_padding_data, output_padding_len), groups, (bool)benchmark, (bool)deterministic, (bool)cudnn_enabled); @@ -294,6 +359,13 @@ void atg__convolution(tensor *out__, tensor input, tensor weight, tensor bias, i ) } +void atg__convolution1(tensor *out__, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int transposed, int64_t *output_padding_data, int output_padding_len, int64_t groups, int benchmark, int deterministic, int cudnn_enabled, int allow_tf32) { + PROTECT( + auto outputs__ = torch::_convolution(*input, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)transposed, torch::IntArrayRef(output_padding_data, output_padding_len), groups, (bool)benchmark, (bool)deterministic, (bool)cudnn_enabled, (bool)allow_tf32); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg__convolution_nogroup(tensor *out__, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int transposed, int64_t *output_padding_data, int output_padding_len) { PROTECT( auto outputs__ = torch::_convolution_nogroup(*input, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)transposed, torch::IntArrayRef(output_padding_data, output_padding_len)); @@ -422,6 +494,16 @@ void atg__embedding_bag_dense_backward(tensor *out__, tensor grad, tensor indice ) } +void atg__embedding_bag_forward_only(tensor *out__, tensor weight, tensor indices, tensor offsets, int scale_grad_by_freq, int64_t mode, int sparse, tensor per_sample_weights, int include_last_offset) { + PROTECT( + auto outputs__ = torch::_embedding_bag_forward_only(*weight, *indices, *offsets, (bool)scale_grad_by_freq, mode, (bool)sparse, (per_sample_weights ? *per_sample_weights : torch::Tensor()), (bool)include_last_offset); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + out__[2] = new torch::Tensor(std::get<2>(outputs__)); + out__[3] = new torch::Tensor(std::get<3>(outputs__)); + ) +} + void atg__embedding_bag_per_sample_weights_backward(tensor *out__, tensor grad, tensor weight, tensor indices, tensor offsets, tensor offset2bag, int64_t mode) { PROTECT( auto outputs__ = torch::_embedding_bag_per_sample_weights_backward(*grad, *weight, *indices, *offsets, *offset2bag, mode); @@ -450,6 +532,45 @@ void atg__empty_per_channel_affine_quantized(tensor *out__, int64_t *size_data, ) } +void atg__euclidean_dist(tensor *out__, tensor x1, tensor x2) { + PROTECT( + auto outputs__ = torch::_euclidean_dist(*x1, *x2); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg__fake_quantize_learnable_per_channel_affine(tensor *out__, tensor self, tensor scale, tensor zero_point, int64_t axis, int64_t quant_min, int64_t quant_max) { + PROTECT( + auto outputs__ = torch::_fake_quantize_learnable_per_channel_affine(*self, *scale, *zero_point, axis, quant_min, quant_max); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg__fake_quantize_learnable_per_channel_affine_backward(tensor *out__, tensor grad, tensor self, tensor scale, tensor zero_point, int64_t axis, int64_t quant_min, int64_t quant_max) { + PROTECT( + auto outputs__ = torch::_fake_quantize_learnable_per_channel_affine_backward(*grad, *self, *scale, *zero_point, axis, quant_min, quant_max); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + out__[2] = new torch::Tensor(std::get<2>(outputs__)); + ) +} + +void atg__fake_quantize_learnable_per_tensor_affine(tensor *out__, tensor self, tensor scale, tensor zero_point, int64_t quant_min, int64_t quant_max) { + PROTECT( + auto outputs__ = torch::_fake_quantize_learnable_per_tensor_affine(*self, *scale, *zero_point, quant_min, quant_max); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg__fake_quantize_learnable_per_tensor_affine_backward(tensor *out__, tensor grad, tensor self, tensor scale, tensor zero_point, int64_t quant_min, int64_t quant_max) { + PROTECT( + auto outputs__ = torch::_fake_quantize_learnable_per_tensor_affine_backward(*grad, *self, *scale, *zero_point, quant_min, quant_max); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + out__[2] = new torch::Tensor(std::get<2>(outputs__)); + ) +} + void atg__fft_with_size(tensor *out__, tensor self, int64_t signal_ndim, int complex_input, int complex_output, int inverse, int64_t *checked_signal_sizes_data, int checked_signal_sizes_len, int normalized, int onesided, int64_t *output_sizes_data, int output_sizes_len) { PROTECT( auto outputs__ = torch::_fft_with_size(*self, signal_ndim, (bool)complex_input, (bool)complex_output, (bool)inverse, torch::IntArrayRef(checked_signal_sizes_data, checked_signal_sizes_len), (bool)normalized, (bool)onesided, torch::IntArrayRef(output_sizes_data, output_sizes_len)); @@ -457,6 +578,13 @@ void atg__fft_with_size(tensor *out__, tensor self, int64_t signal_ndim, int com ) } +void atg__fft_with_size1(tensor *out__, tensor self, int64_t signal_ndim, int complex_input, int complex_output, int inverse, int64_t *checked_signal_sizes_data, int checked_signal_sizes_len, int64_t normalization, int onesided, int64_t *output_sizes_data, int output_sizes_len) { + PROTECT( + auto outputs__ = torch::_fft_with_size(*self, signal_ndim, (bool)complex_input, (bool)complex_output, (bool)inverse, torch::IntArrayRef(checked_signal_sizes_data, checked_signal_sizes_len), normalization, (bool)onesided, torch::IntArrayRef(output_sizes_data, output_sizes_len)); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg__fused_dropout(tensor *out__, tensor self, double p) { PROTECT( auto outputs__ = torch::_fused_dropout(*self, p); @@ -472,6 +600,21 @@ void atg__gather_sparse_backward(tensor *out__, tensor self, int64_t dim, tensor ) } +void atg__grid_sampler_2d_cpu_fallback(tensor *out__, tensor input, tensor grid, int64_t interpolation_mode, int64_t padding_mode, int align_corners) { + PROTECT( + auto outputs__ = torch::_grid_sampler_2d_cpu_fallback(*input, *grid, interpolation_mode, padding_mode, (bool)align_corners); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg__grid_sampler_2d_cpu_fallback_backward(tensor *out__, tensor grad_output, tensor input, tensor grid, int64_t interpolation_mode, int64_t padding_mode, int align_corners) { + PROTECT( + auto outputs__ = torch::_grid_sampler_2d_cpu_fallback_backward(*grad_output, *input, *grid, interpolation_mode, padding_mode, (bool)align_corners); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + ) +} + void atg__index_copy_(tensor *out__, tensor self, int64_t dim, tensor index, tensor source) { PROTECT( auto outputs__ = torch::_index_copy_(*self, dim, *index, *source); @@ -514,6 +657,20 @@ void atg__log_softmax_backward_data(tensor *out__, tensor grad_output, tensor ou ) } +void atg__logcumsumexp(tensor *out__, tensor self, int64_t dim) { + PROTECT( + auto outputs__ = torch::_logcumsumexp(*self, dim); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg__logcumsumexp_out(tensor *out__, tensor out, tensor self, int64_t dim) { + PROTECT( + auto outputs__ = torch::_logcumsumexp_out(*out, *self, dim); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg__lu_solve_helper(tensor *out__, tensor self, tensor LU_data, tensor LU_pivots) { PROTECT( auto outputs__ = torch::_lu_solve_helper(*self, *LU_data, *LU_pivots); @@ -551,38 +708,6 @@ void atg__masked_scale(tensor *out__, tensor self, tensor mask, double scale) { ) } -void atg__max(tensor *out__, tensor self, int64_t dim, int keepdim) { - PROTECT( - auto outputs__ = torch::_max(*self, dim, (bool)keepdim); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); - ) -} - -void atg__max_out(tensor *out__, tensor max, tensor max_indices, tensor self, int64_t dim, int keepdim) { - PROTECT( - auto outputs__ = torch::_max_out(*max, *max_indices, *self, dim, (bool)keepdim); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); - ) -} - -void atg__min(tensor *out__, tensor self, int64_t dim, int keepdim) { - PROTECT( - auto outputs__ = torch::_min(*self, dim, (bool)keepdim); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); - ) -} - -void atg__min_out(tensor *out__, tensor min, tensor min_indices, tensor self, int64_t dim, int keepdim) { - PROTECT( - auto outputs__ = torch::_min_out(*min, *min_indices, *self, dim, (bool)keepdim); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); - ) -} - void atg__mkldnn_reshape(tensor *out__, tensor self, int64_t *shape_data, int shape_len) { PROTECT( auto outputs__ = torch::_mkldnn_reshape(*self, torch::IntArrayRef(shape_data, shape_len)); @@ -694,6 +819,13 @@ void atg__qr_helper(tensor *out__, tensor self, int some) { ) } +void atg__remove_batch_dim(tensor *out__, tensor self, int64_t level, int64_t batch_size, int64_t out_dim) { + PROTECT( + auto outputs__ = torch::_remove_batch_dim(*self, level, batch_size, out_dim); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg__reshape_from_tensor(tensor *out__, tensor self, tensor shape) { PROTECT( auto outputs__ = torch::_reshape_from_tensor(*self, *shape); @@ -715,6 +847,13 @@ void atg__sample_dirichlet(tensor *out__, tensor self) { ) } +void atg__saturate_weight_to_fp16(tensor *out__, tensor weight) { + PROTECT( + auto outputs__ = torch::_saturate_weight_to_fp16(*weight); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg__shape_as_tensor(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::_shape_as_tensor(*self); @@ -801,6 +940,27 @@ void atg__sparse_coo_tensor_with_dims_and_tensors(tensor *out__, int64_t sparse_ ) } +void atg__sparse_log_softmax(tensor *out__, tensor self, int64_t dim, int dtype) { + PROTECT( + auto outputs__ = torch::_sparse_log_softmax(*self, dim, at::ScalarType(dtype)); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg__sparse_log_softmax1(tensor *out__, tensor self, int64_t dim, int half_to_float) { + PROTECT( + auto outputs__ = torch::_sparse_log_softmax(*self, dim, (bool)half_to_float); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg__sparse_log_softmax_backward_data(tensor *out__, tensor grad_output, tensor output, int64_t dim, tensor self) { + PROTECT( + auto outputs__ = torch::_sparse_log_softmax_backward_data(*grad_output, *output, dim, *self); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg__sparse_mm(tensor *out__, tensor sparse, tensor dense) { PROTECT( auto outputs__ = torch::_sparse_mm(*sparse, *dense); @@ -808,6 +968,27 @@ void atg__sparse_mm(tensor *out__, tensor sparse, tensor dense) { ) } +void atg__sparse_softmax(tensor *out__, tensor self, int64_t dim, int dtype) { + PROTECT( + auto outputs__ = torch::_sparse_softmax(*self, dim, at::ScalarType(dtype)); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg__sparse_softmax1(tensor *out__, tensor self, int64_t dim, int half_to_float) { + PROTECT( + auto outputs__ = torch::_sparse_softmax(*self, dim, (bool)half_to_float); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg__sparse_softmax_backward_data(tensor *out__, tensor grad_output, tensor output, int64_t dim, tensor self) { + PROTECT( + auto outputs__ = torch::_sparse_softmax_backward_data(*grad_output, *output, dim, *self); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg__sparse_sum(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::_sparse_sum(*self); @@ -881,6 +1062,27 @@ void atg__symeig_helper(tensor *out__, tensor self, int eigenvectors, int upper) ) } +void atg__test_optional_filled_intlist(tensor *out__, tensor values, int64_t *addends_data, int addends_len) { + PROTECT( + auto outputs__ = torch::_test_optional_filled_intlist(*values, torch::IntArrayRef(addends_data, addends_len)); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg__test_optional_intlist(tensor *out__, tensor values, int64_t *addends_data, int addends_len) { + PROTECT( + auto outputs__ = torch::_test_optional_intlist(*values, torch::IntArrayRef(addends_data, addends_len)); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg__test_serialization_subcmul(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::_test_serialization_subcmul(*self, *other); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg__triangular_solve_helper(tensor *out__, tensor self, tensor A, int upper, int transpose, int unitriangular) { PROTECT( auto outputs__ = torch::_triangular_solve_helper(*self, *A, (bool)upper, (bool)transpose, (bool)unitriangular); @@ -986,6 +1188,27 @@ void atg_abs_out(tensor *out__, tensor out, tensor self) { ) } +void atg_absolute(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::absolute(*self); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_absolute_(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = self->absolute_(); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_absolute_out(tensor *out__, tensor out, tensor self) { + PROTECT( + auto outputs__ = torch::absolute_out(*out, *self); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_acos(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::acos(*self); @@ -1007,6 +1230,27 @@ void atg_acos_out(tensor *out__, tensor out, tensor self) { ) } +void atg_acosh(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::acosh(*self); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_acosh_(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::acosh_(*self); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_acosh_out(tensor *out__, tensor out, tensor self) { + PROTECT( + auto outputs__ = torch::acosh_out(*out, *self); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_adaptive_avg_pool1d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len) { PROTECT( auto outputs__ = torch::adaptive_avg_pool1d(*self, torch::IntArrayRef(output_size_data, output_size_len)); @@ -1361,6 +1605,34 @@ void atg_alpha_dropout_(tensor *out__, tensor self, double p, int train) { ) } +void atg_amax(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int keepdim) { + PROTECT( + auto outputs__ = torch::amax(*self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_amax_out(tensor *out__, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim) { + PROTECT( + auto outputs__ = torch::amax_out(*out, *self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_amin(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int keepdim) { + PROTECT( + auto outputs__ = torch::amin(*self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_amin_out(tensor *out__, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim) { + PROTECT( + auto outputs__ = torch::amin_out(*out, *self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_angle(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::angle(*self); @@ -1431,16 +1703,142 @@ void atg_arange_out1(tensor *out__, tensor out, scalar start, scalar end) { ) } -void atg_argmax(tensor *out__, tensor self, int64_t dim, int keepdim) { +void atg_arccos(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::argmax(*self, dim, (bool)keepdim); + auto outputs__ = torch::arccos(*self); out__[0] = new torch::Tensor(outputs__); ) } -void atg_argmin(tensor *out__, tensor self, int64_t dim, int keepdim) { +void atg_arccos_(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::argmin(*self, dim, (bool)keepdim); + auto outputs__ = torch::arccos_(*self); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_arccos_out(tensor *out__, tensor out, tensor self) { + PROTECT( + auto outputs__ = torch::arccos_out(*out, *self); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_arccosh(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::arccosh(*self); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_arccosh_(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::arccosh_(*self); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_arccosh_out(tensor *out__, tensor out, tensor self) { + PROTECT( + auto outputs__ = torch::arccosh_out(*out, *self); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_arcsin(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::arcsin(*self); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_arcsin_(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::arcsin_(*self); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_arcsin_out(tensor *out__, tensor out, tensor self) { + PROTECT( + auto outputs__ = torch::arcsin_out(*out, *self); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_arcsinh(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::arcsinh(*self); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_arcsinh_(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::arcsinh_(*self); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_arcsinh_out(tensor *out__, tensor out, tensor self) { + PROTECT( + auto outputs__ = torch::arcsinh_out(*out, *self); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_arctan(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::arctan(*self); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_arctan_(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::arctan_(*self); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_arctan_out(tensor *out__, tensor out, tensor self) { + PROTECT( + auto outputs__ = torch::arctan_out(*out, *self); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_arctanh(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::arctanh(*self); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_arctanh_(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::arctanh_(*self); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_arctanh_out(tensor *out__, tensor out, tensor self) { + PROTECT( + auto outputs__ = torch::arctanh_out(*out, *self); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_argmax(tensor *out__, tensor self, int64_t dim_v, uint8_t dim_null, int keepdim) { + PROTECT( + auto outputs__ = torch::argmax(*self, dim_null ? c10::nullopt : c10::optional(dim_v), (bool)keepdim); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_argmin(tensor *out__, tensor self, int64_t dim_v, uint8_t dim_null, int keepdim) { + PROTECT( + auto outputs__ = torch::argmin(*self, dim_null ? c10::nullopt : c10::optional(dim_v), (bool)keepdim); out__[0] = new torch::Tensor(outputs__); ) } @@ -1452,16 +1850,16 @@ void atg_argsort(tensor *out__, tensor self, int64_t dim, int descending) { ) } -void atg_as_strided(tensor *out__, tensor self, int64_t *size_data, int size_len, int64_t *stride_data, int stride_len, int64_t storage_offset) { +void atg_as_strided(tensor *out__, tensor self, int64_t *size_data, int size_len, int64_t *stride_data, int stride_len, int64_t storage_offset_v, uint8_t storage_offset_null) { PROTECT( - auto outputs__ = torch::as_strided(*self, torch::IntArrayRef(size_data, size_len), torch::IntArrayRef(stride_data, stride_len), storage_offset); + auto outputs__ = torch::as_strided(*self, torch::IntArrayRef(size_data, size_len), torch::IntArrayRef(stride_data, stride_len), storage_offset_null ? c10::nullopt : c10::optional(storage_offset_v)); out__[0] = new torch::Tensor(outputs__); ) } -void atg_as_strided_(tensor *out__, tensor self, int64_t *size_data, int size_len, int64_t *stride_data, int stride_len, int64_t storage_offset) { +void atg_as_strided_(tensor *out__, tensor self, int64_t *size_data, int size_len, int64_t *stride_data, int stride_len, int64_t storage_offset_v, uint8_t storage_offset_null) { PROTECT( - auto outputs__ = torch::as_strided_(*self, torch::IntArrayRef(size_data, size_len), torch::IntArrayRef(stride_data, stride_len), storage_offset); + auto outputs__ = torch::as_strided_(*self, torch::IntArrayRef(size_data, size_len), torch::IntArrayRef(stride_data, stride_len), storage_offset_null ? c10::nullopt : c10::optional(storage_offset_v)); out__[0] = new torch::Tensor(outputs__); ) } @@ -1487,6 +1885,27 @@ void atg_asin_out(tensor *out__, tensor out, tensor self) { ) } +void atg_asinh(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::asinh(*self); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_asinh_(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::asinh_(*self); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_asinh_out(tensor *out__, tensor out, tensor self) { + PROTECT( + auto outputs__ = torch::asinh_out(*out, *self); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_atan(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::atan(*self); @@ -1529,6 +1948,87 @@ void atg_atan_out(tensor *out__, tensor out, tensor self) { ) } +void atg_atanh(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::atanh(*self); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_atanh_(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::atanh_(*self); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_atanh_out(tensor *out__, tensor out, tensor self) { + PROTECT( + auto outputs__ = torch::atanh_out(*out, *self); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_atleast_1d(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::atleast_1d(*self); + out__[0] = new torch::Tensor(outputs__); + ) +} + +tensor *atg_atleast_1d1(tensor *tensors_data, int tensors_len) { + PROTECT( + auto outputs__ = torch::atleast_1d(of_carray_tensor(tensors_data, tensors_len)); + int sz = outputs__.size(); + torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); + for (int i = 0; i < sz; ++i) + out__[i] = new torch::Tensor(outputs__[i]); + out__[sz] = nullptr; + return out__; + ) + return nullptr; +} + +void atg_atleast_2d(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::atleast_2d(*self); + out__[0] = new torch::Tensor(outputs__); + ) +} + +tensor *atg_atleast_2d1(tensor *tensors_data, int tensors_len) { + PROTECT( + auto outputs__ = torch::atleast_2d(of_carray_tensor(tensors_data, tensors_len)); + int sz = outputs__.size(); + torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); + for (int i = 0; i < sz; ++i) + out__[i] = new torch::Tensor(outputs__[i]); + out__[sz] = nullptr; + return out__; + ) + return nullptr; +} + +void atg_atleast_3d(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::atleast_3d(*self); + out__[0] = new torch::Tensor(outputs__); + ) +} + +tensor *atg_atleast_3d1(tensor *tensors_data, int tensors_len) { + PROTECT( + auto outputs__ = torch::atleast_3d(of_carray_tensor(tensors_data, tensors_len)); + int sz = outputs__.size(); + torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); + for (int i = 0; i < sz; ++i) + out__[i] = new torch::Tensor(outputs__[i]); + out__[sz] = nullptr; + return out__; + ) + return nullptr; +} + void atg_avg_pool1d(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad) { PROTECT( auto outputs__ = torch::avg_pool1d(*self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), (bool)ceil_mode, (bool)count_include_pad); @@ -1536,58 +2036,58 @@ void atg_avg_pool1d(tensor *out__, tensor self, int64_t *kernel_size_data, int k ) } -void atg_avg_pool2d(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override) { +void atg_avg_pool2d(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override_v, uint8_t divisor_override_null) { PROTECT( - auto outputs__ = torch::avg_pool2d(*self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), (bool)ceil_mode, (bool)count_include_pad, divisor_override); + auto outputs__ = torch::avg_pool2d(*self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), (bool)ceil_mode, (bool)count_include_pad, divisor_override_null ? c10::nullopt : c10::optional(divisor_override_v)); out__[0] = new torch::Tensor(outputs__); ) } -void atg_avg_pool2d_backward(tensor *out__, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override) { +void atg_avg_pool2d_backward(tensor *out__, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override_v, uint8_t divisor_override_null) { PROTECT( - auto outputs__ = torch::avg_pool2d_backward(*grad_output, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), (bool)ceil_mode, (bool)count_include_pad, divisor_override); + auto outputs__ = torch::avg_pool2d_backward(*grad_output, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), (bool)ceil_mode, (bool)count_include_pad, divisor_override_null ? c10::nullopt : c10::optional(divisor_override_v)); out__[0] = new torch::Tensor(outputs__); ) } -void atg_avg_pool2d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override) { +void atg_avg_pool2d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override_v, uint8_t divisor_override_null) { PROTECT( - auto outputs__ = torch::avg_pool2d_backward_out(*grad_input, *grad_output, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), (bool)ceil_mode, (bool)count_include_pad, divisor_override); + auto outputs__ = torch::avg_pool2d_backward_out(*grad_input, *grad_output, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), (bool)ceil_mode, (bool)count_include_pad, divisor_override_null ? c10::nullopt : c10::optional(divisor_override_v)); out__[0] = new torch::Tensor(outputs__); ) } -void atg_avg_pool2d_out(tensor *out__, tensor out, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override) { +void atg_avg_pool2d_out(tensor *out__, tensor out, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override_v, uint8_t divisor_override_null) { PROTECT( - auto outputs__ = torch::avg_pool2d_out(*out, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), (bool)ceil_mode, (bool)count_include_pad, divisor_override); + auto outputs__ = torch::avg_pool2d_out(*out, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), (bool)ceil_mode, (bool)count_include_pad, divisor_override_null ? c10::nullopt : c10::optional(divisor_override_v)); out__[0] = new torch::Tensor(outputs__); ) } -void atg_avg_pool3d(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override) { +void atg_avg_pool3d(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override_v, uint8_t divisor_override_null) { PROTECT( - auto outputs__ = torch::avg_pool3d(*self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), (bool)ceil_mode, (bool)count_include_pad, divisor_override); + auto outputs__ = torch::avg_pool3d(*self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), (bool)ceil_mode, (bool)count_include_pad, divisor_override_null ? c10::nullopt : c10::optional(divisor_override_v)); out__[0] = new torch::Tensor(outputs__); ) } -void atg_avg_pool3d_backward(tensor *out__, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override) { +void atg_avg_pool3d_backward(tensor *out__, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override_v, uint8_t divisor_override_null) { PROTECT( - auto outputs__ = torch::avg_pool3d_backward(*grad_output, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), (bool)ceil_mode, (bool)count_include_pad, divisor_override); + auto outputs__ = torch::avg_pool3d_backward(*grad_output, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), (bool)ceil_mode, (bool)count_include_pad, divisor_override_null ? c10::nullopt : c10::optional(divisor_override_v)); out__[0] = new torch::Tensor(outputs__); ) } -void atg_avg_pool3d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override) { +void atg_avg_pool3d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override_v, uint8_t divisor_override_null) { PROTECT( - auto outputs__ = torch::avg_pool3d_backward_out(*grad_input, *grad_output, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), (bool)ceil_mode, (bool)count_include_pad, divisor_override); + auto outputs__ = torch::avg_pool3d_backward_out(*grad_input, *grad_output, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), (bool)ceil_mode, (bool)count_include_pad, divisor_override_null ? c10::nullopt : c10::optional(divisor_override_v)); out__[0] = new torch::Tensor(outputs__); ) } -void atg_avg_pool3d_out(tensor *out__, tensor out, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override) { +void atg_avg_pool3d_out(tensor *out__, tensor out, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override_v, uint8_t divisor_override_null) { PROTECT( - auto outputs__ = torch::avg_pool3d_out(*out, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), (bool)ceil_mode, (bool)count_include_pad, divisor_override); + auto outputs__ = torch::avg_pool3d_out(*out, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), (bool)ceil_mode, (bool)count_include_pad, divisor_override_null ? c10::nullopt : c10::optional(divisor_override_v)); out__[0] = new torch::Tensor(outputs__); ) } @@ -1673,9 +2173,9 @@ void atg_batch_norm_gather_stats(tensor *out__, tensor input, tensor mean, tenso ) } -void atg_batch_norm_gather_stats_with_counts(tensor *out__, tensor input, tensor mean, tensor invstd, tensor running_mean, tensor running_var, double momentum, double eps, int64_t *counts_data, int counts_len) { +void atg_batch_norm_gather_stats_with_counts(tensor *out__, tensor input, tensor mean, tensor invstd, tensor running_mean, tensor running_var, double momentum, double eps, tensor counts) { PROTECT( - auto outputs__ = torch::batch_norm_gather_stats_with_counts(*input, *mean, *invstd, (running_mean ? *running_mean : torch::Tensor()), (running_var ? *running_var : torch::Tensor()), momentum, eps, torch::IntArrayRef(counts_data, counts_len)); + auto outputs__ = torch::batch_norm_gather_stats_with_counts(*input, *mean, *invstd, (running_mean ? *running_mean : torch::Tensor()), (running_var ? *running_var : torch::Tensor()), momentum, eps, *counts); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); ) @@ -1788,6 +2288,13 @@ void atg_bincount(tensor *out__, tensor self, tensor weights, int64_t minlength) ) } +void atg_binomial(tensor *out__, tensor count, tensor prob) { + PROTECT( + auto outputs__ = torch::binomial(*count, *prob); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_bitwise_and(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = torch::bitwise_and(*self, *other); @@ -1949,6 +2456,13 @@ void atg_blackman_window1(tensor *out__, int64_t window_length, int periodic, in ) } +void atg_block_diag(tensor *out__, tensor *tensors_data, int tensors_len) { + PROTECT( + auto outputs__ = torch::block_diag(of_carray_tensor(tensors_data, tensors_len)); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_bmm(tensor *out__, tensor self, tensor mat2) { PROTECT( auto outputs__ = torch::bmm(*self, *mat2); @@ -1976,6 +2490,27 @@ tensor *atg_broadcast_tensors(tensor *tensors_data, int tensors_len) { return nullptr; } +void atg_bucketize(tensor *out__, tensor self, tensor boundaries, int out_int32, int right) { + PROTECT( + auto outputs__ = torch::bucketize(*self, *boundaries, (bool)out_int32, (bool)right); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_bucketize1(tensor *out__, scalar self_scalar, tensor boundaries, int out_int32, int right) { + PROTECT( + auto outputs__ = torch::bucketize(*self_scalar, *boundaries, (bool)out_int32, (bool)right); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_bucketize_out(tensor *out__, tensor out, tensor self, tensor boundaries, int out_int32, int right) { + PROTECT( + auto outputs__ = torch::bucketize_out(*out, *self, *boundaries, (bool)out_int32, (bool)right); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_cartesian_prod(tensor *out__, tensor *tensors_data, int tensors_len) { PROTECT( auto outputs__ = torch::cartesian_prod(of_carray_tensor(tensors_data, tensors_len)); @@ -2004,9 +2539,9 @@ void atg_cauchy_(tensor *out__, tensor self, double median, double sigma) { ) } -void atg_cdist(tensor *out__, tensor x1, tensor x2, double p, int64_t compute_mode) { +void atg_cdist(tensor *out__, tensor x1, tensor x2, double p, int64_t compute_mode_v, uint8_t compute_mode_null) { PROTECT( - auto outputs__ = torch::cdist(*x1, *x2, p, compute_mode); + auto outputs__ = torch::cdist(*x1, *x2, p, compute_mode_null ? c10::nullopt : c10::optional(compute_mode_v)); out__[0] = new torch::Tensor(outputs__); ) } @@ -2053,6 +2588,13 @@ void atg_chain_matmul(tensor *out__, tensor *matrices_data, int matrices_len) { ) } +void atg_channel_shuffle(tensor *out__, tensor self, int64_t groups) { + PROTECT( + auto outputs__ = torch::channel_shuffle(*self, groups); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_cholesky(tensor *out__, tensor self, int upper) { PROTECT( auto outputs__ = torch::cholesky(*self, (bool)upper); @@ -2171,6 +2713,27 @@ void atg_clamp_out(tensor *out__, tensor out, tensor self, scalar min, scalar ma ) } +void atg_clip(tensor *out__, tensor self, scalar min, scalar max) { + PROTECT( + auto outputs__ = torch::clip(*self, *min, *max); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_clip_(tensor *out__, tensor self, scalar min, scalar max) { + PROTECT( + auto outputs__ = torch::clip_(*self, *min, *max); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_clip_out(tensor *out__, tensor out, tensor self, scalar min, scalar max) { + PROTECT( + auto outputs__ = torch::clip_out(*out, *self, *min, *max); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_coalesce(tensor *out__, tensor self) { PROTECT( auto outputs__ = self->coalesce(); @@ -2213,6 +2776,20 @@ void atg_combinations(tensor *out__, tensor self, int64_t r, int with_replacemen ) } +void atg_complex(tensor *out__, tensor real, tensor imag) { + PROTECT( + auto outputs__ = torch::complex(*real, *imag); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_complex_out(tensor *out__, tensor out, tensor real, tensor imag) { + PROTECT( + auto outputs__ = torch::complex_out(*out, *real, *imag); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_conj(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::conj(*self); @@ -2376,16 +2953,30 @@ void atg_cosine_similarity(tensor *out__, tensor x1, tensor x2, int64_t dim, dou ) } -void atg_cross(tensor *out__, tensor self, tensor other, int64_t dim) { +void atg_count_nonzero(tensor *out__, tensor self, int64_t *dim_data, int dim_len) { PROTECT( - auto outputs__ = torch::cross(*self, *other, dim); + auto outputs__ = torch::count_nonzero(*self, torch::IntArrayRef(dim_data, dim_len)); out__[0] = new torch::Tensor(outputs__); ) } -void atg_cross_out(tensor *out__, tensor out, tensor self, tensor other, int64_t dim) { +void atg_count_nonzero1(tensor *out__, tensor self, int64_t dim_v, uint8_t dim_null) { PROTECT( - auto outputs__ = torch::cross_out(*out, *self, *other, dim); + auto outputs__ = torch::count_nonzero(*self, dim_null ? c10::nullopt : c10::optional(dim_v)); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_cross(tensor *out__, tensor self, tensor other, int64_t dim_v, uint8_t dim_null) { + PROTECT( + auto outputs__ = torch::cross(*self, *other, dim_null ? c10::nullopt : c10::optional(dim_v)); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_cross_out(tensor *out__, tensor out, tensor self, tensor other, int64_t dim_v, uint8_t dim_null) { + PROTECT( + auto outputs__ = torch::cross_out(*out, *self, *other, dim_null ? c10::nullopt : c10::optional(dim_v)); out__[0] = new torch::Tensor(outputs__); ) } @@ -2451,16 +3042,23 @@ void atg_cudnn_convolution1(tensor *out__, tensor self, tensor weight, tensor bi ) } -void atg_cudnn_convolution_backward_input(tensor *out__, int64_t *self_size_data, int self_size_len, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { +void atg_cudnn_convolution2(tensor *out__, tensor self, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic, int allow_tf32) { PROTECT( - auto outputs__ = torch::cudnn_convolution_backward_input(torch::IntArrayRef(self_size_data, self_size_len), *grad_output, *weight, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); + auto outputs__ = torch::cudnn_convolution(*self, *weight, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic, (bool)allow_tf32); out__[0] = new torch::Tensor(outputs__); ) } -void atg_cudnn_convolution_backward_weight(tensor *out__, int64_t *weight_size_data, int weight_size_len, tensor grad_output, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { +void atg_cudnn_convolution_backward_input(tensor *out__, int64_t *self_size_data, int self_size_len, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic, int allow_tf32) { PROTECT( - auto outputs__ = torch::cudnn_convolution_backward_weight(torch::IntArrayRef(weight_size_data, weight_size_len), *grad_output, *self, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); + auto outputs__ = torch::cudnn_convolution_backward_input(torch::IntArrayRef(self_size_data, self_size_len), *grad_output, *weight, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic, (bool)allow_tf32); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_cudnn_convolution_backward_weight(tensor *out__, int64_t *weight_size_data, int weight_size_len, tensor grad_output, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic, int allow_tf32) { + PROTECT( + auto outputs__ = torch::cudnn_convolution_backward_weight(torch::IntArrayRef(weight_size_data, weight_size_len), *grad_output, *self, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic, (bool)allow_tf32); out__[0] = new torch::Tensor(outputs__); ) } @@ -2479,16 +3077,23 @@ void atg_cudnn_convolution_transpose1(tensor *out__, tensor self, tensor weight, ) } -void atg_cudnn_convolution_transpose_backward_input(tensor *out__, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { +void atg_cudnn_convolution_transpose2(tensor *out__, tensor self, tensor weight, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic, int allow_tf32) { PROTECT( - auto outputs__ = torch::cudnn_convolution_transpose_backward_input(*grad_output, *weight, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); + auto outputs__ = torch::cudnn_convolution_transpose(*self, *weight, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(output_padding_data, output_padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic, (bool)allow_tf32); out__[0] = new torch::Tensor(outputs__); ) } -void atg_cudnn_convolution_transpose_backward_weight(tensor *out__, int64_t *weight_size_data, int weight_size_len, tensor grad_output, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { +void atg_cudnn_convolution_transpose_backward_input(tensor *out__, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic, int allow_tf32) { PROTECT( - auto outputs__ = torch::cudnn_convolution_transpose_backward_weight(torch::IntArrayRef(weight_size_data, weight_size_len), *grad_output, *self, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); + auto outputs__ = torch::cudnn_convolution_transpose_backward_input(*grad_output, *weight, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic, (bool)allow_tf32); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_cudnn_convolution_transpose_backward_weight(tensor *out__, int64_t *weight_size_data, int weight_size_len, tensor grad_output, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic, int allow_tf32) { + PROTECT( + auto outputs__ = torch::cudnn_convolution_transpose_backward_weight(torch::IntArrayRef(weight_size_data, weight_size_len), *grad_output, *self, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic, (bool)allow_tf32); out__[0] = new torch::Tensor(outputs__); ) } @@ -2524,6 +3129,13 @@ void atg_cummax_out(tensor *out__, tensor values, tensor indices, tensor self, i ) } +void atg_cummaxmin_backward(tensor *out__, tensor grad, tensor input, tensor indices, int64_t dim) { + PROTECT( + auto outputs__ = torch::cummaxmin_backward(*grad, *input, *indices, dim); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_cummin(tensor *out__, tensor self, int64_t dim) { PROTECT( auto outputs__ = torch::cummin(*self, dim); @@ -2547,6 +3159,13 @@ void atg_cumprod(tensor *out__, tensor self, int64_t dim, int dtype) { ) } +void atg_cumprod_backward(tensor *out__, tensor grad, tensor input, int64_t dim) { + PROTECT( + auto outputs__ = torch::cumprod_backward(*grad, *input, dim); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_cumprod_out(tensor *out__, tensor out, tensor self, int64_t dim, int dtype) { PROTECT( auto outputs__ = torch::cumprod_out(*out, *self, dim, at::ScalarType(dtype)); @@ -2575,6 +3194,27 @@ void atg_data(tensor *out__, tensor self) { ) } +void atg_deg2rad(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::deg2rad(*self); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_deg2rad_(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::deg2rad_(*self); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_deg2rad_out(tensor *out__, tensor out, tensor self) { + PROTECT( + auto outputs__ = torch::deg2rad_out(*out, *self); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_dequantize(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::dequantize(*self); @@ -2582,6 +3222,19 @@ void atg_dequantize(tensor *out__, tensor self) { ) } +tensor *atg_dequantize1(tensor *tensors_data, int tensors_len) { + PROTECT( + auto outputs__ = torch::dequantize(of_carray_tensor(tensors_data, tensors_len)); + int sz = outputs__.size(); + torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); + for (int i = 0; i < sz; ++i) + out__[i] = new torch::Tensor(outputs__[i]); + out__[sz] = nullptr; + return out__; + ) + return nullptr; +} + void atg_det(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::det(*self); @@ -2610,6 +3263,13 @@ void atg_diag(tensor *out__, tensor self, int64_t diagonal) { ) } +void atg_diag_backward(tensor *out__, tensor grad, int64_t *input_sizes_data, int input_sizes_len, int64_t diagonal) { + PROTECT( + auto outputs__ = torch::diag_backward(*grad, torch::IntArrayRef(input_sizes_data, input_sizes_len), diagonal); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_diag_embed(tensor *out__, tensor self, int64_t offset, int64_t dim1, int64_t dim2) { PROTECT( auto outputs__ = torch::diag_embed(*self, offset, dim1, dim2); @@ -2638,6 +3298,13 @@ void atg_diagonal(tensor *out__, tensor self, int64_t offset, int64_t dim1, int6 ) } +void atg_diagonal_backward(tensor *out__, tensor grad, int64_t *input_sizes_data, int input_sizes_len, int64_t offset, int64_t dim1, int64_t dim2) { + PROTECT( + auto outputs__ = torch::diagonal_backward(*grad, torch::IntArrayRef(input_sizes_data, input_sizes_len), offset, dim1, dim2); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_digamma(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::digamma(*self); @@ -2701,6 +3368,41 @@ void atg_div_out(tensor *out__, tensor out, tensor self, tensor other) { ) } +void atg_divide(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::divide(*self, *other); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_divide1(tensor *out__, tensor self, scalar other) { + PROTECT( + auto outputs__ = torch::divide(*self, *other); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_divide_(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = self->divide_(*other); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_divide_1(tensor *out__, tensor self, scalar other) { + PROTECT( + auto outputs__ = self->divide_(*other); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_divide_out(tensor *out__, tensor out, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::divide_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_dot(tensor *out__, tensor self, tensor tensor) { PROTECT( auto outputs__ = torch::dot(*self, *tensor); @@ -2729,6 +3431,20 @@ void atg_dropout_(tensor *out__, tensor self, double p, int train) { ) } +void atg_dstack(tensor *out__, tensor *tensors_data, int tensors_len) { + PROTECT( + auto outputs__ = torch::dstack(of_carray_tensor(tensors_data, tensors_len)); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_dstack_out(tensor *out__, tensor out, tensor *tensors_data, int tensors_len) { + PROTECT( + auto outputs__ = torch::dstack_out(*out, of_carray_tensor(tensors_data, tensors_len)); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_eig(tensor *out__, tensor self, int eigenvectors) { PROTECT( auto outputs__ = torch::eig(*self, (bool)eigenvectors); @@ -2846,6 +3562,13 @@ void atg_empty_like(tensor *out__, tensor self) { ) } +void atg_empty_meta(tensor *out__, int64_t *size_data, int size_len, int options_kind, int options_device) { + PROTECT( + auto outputs__ = torch::empty_meta(torch::IntArrayRef(size_data, size_len), at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_empty_out(tensor *out__, tensor out, int64_t *size_data, int size_len) { PROTECT( auto outputs__ = torch::empty_out(*out, torch::IntArrayRef(size_data, size_len)); @@ -2853,6 +3576,13 @@ void atg_empty_out(tensor *out__, tensor out, int64_t *size_data, int size_len) ) } +void atg_empty_quantized(tensor *out__, int64_t *size_data, int size_len, tensor qtensor) { + PROTECT( + auto outputs__ = torch::empty_quantized(torch::IntArrayRef(size_data, size_len), *qtensor); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_empty_strided(tensor *out__, int64_t *size_data, int size_len, int64_t *stride_data, int stride_len, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::empty_strided(torch::IntArrayRef(size_data, size_len), torch::IntArrayRef(stride_data, stride_len), at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); @@ -2972,6 +3702,27 @@ void atg_exp(tensor *out__, tensor self) { ) } +void atg_exp2(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::exp2(*self); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_exp2_(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::exp2_(*self); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_exp2_out(tensor *out__, tensor out, tensor self) { + PROTECT( + auto outputs__ = torch::exp2_out(*out, *self); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_exp_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::exp_(*self); @@ -3168,6 +3919,76 @@ void atg_fft(tensor *out__, tensor self, int64_t signal_ndim, int normalized) { ) } +void atg_fft_fft(tensor *out__, tensor self, int64_t n_v, uint8_t n_null, int64_t dim, char* norm_ptr, int norm_len) { + PROTECT( + auto outputs__ = torch::fft_fft(*self, n_null ? c10::nullopt : c10::optional(n_v), dim, std::string(norm_ptr, norm_len)); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_fft_fftn(tensor *out__, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char* norm_ptr, int norm_len) { + PROTECT( + auto outputs__ = torch::fft_fftn(*self, torch::IntArrayRef(s_data, s_len), torch::IntArrayRef(dim_data, dim_len), std::string(norm_ptr, norm_len)); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_fft_hfft(tensor *out__, tensor self, int64_t n_v, uint8_t n_null, int64_t dim, char* norm_ptr, int norm_len) { + PROTECT( + auto outputs__ = torch::fft_hfft(*self, n_null ? c10::nullopt : c10::optional(n_v), dim, std::string(norm_ptr, norm_len)); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_fft_ifft(tensor *out__, tensor self, int64_t n_v, uint8_t n_null, int64_t dim, char* norm_ptr, int norm_len) { + PROTECT( + auto outputs__ = torch::fft_ifft(*self, n_null ? c10::nullopt : c10::optional(n_v), dim, std::string(norm_ptr, norm_len)); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_fft_ifftn(tensor *out__, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char* norm_ptr, int norm_len) { + PROTECT( + auto outputs__ = torch::fft_ifftn(*self, torch::IntArrayRef(s_data, s_len), torch::IntArrayRef(dim_data, dim_len), std::string(norm_ptr, norm_len)); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_fft_ihfft(tensor *out__, tensor self, int64_t n_v, uint8_t n_null, int64_t dim, char* norm_ptr, int norm_len) { + PROTECT( + auto outputs__ = torch::fft_ihfft(*self, n_null ? c10::nullopt : c10::optional(n_v), dim, std::string(norm_ptr, norm_len)); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_fft_irfft(tensor *out__, tensor self, int64_t n_v, uint8_t n_null, int64_t dim, char* norm_ptr, int norm_len) { + PROTECT( + auto outputs__ = torch::fft_irfft(*self, n_null ? c10::nullopt : c10::optional(n_v), dim, std::string(norm_ptr, norm_len)); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_fft_irfftn(tensor *out__, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char* norm_ptr, int norm_len) { + PROTECT( + auto outputs__ = torch::fft_irfftn(*self, torch::IntArrayRef(s_data, s_len), torch::IntArrayRef(dim_data, dim_len), std::string(norm_ptr, norm_len)); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_fft_rfft(tensor *out__, tensor self, int64_t n_v, uint8_t n_null, int64_t dim, char* norm_ptr, int norm_len) { + PROTECT( + auto outputs__ = torch::fft_rfft(*self, n_null ? c10::nullopt : c10::optional(n_v), dim, std::string(norm_ptr, norm_len)); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_fft_rfftn(tensor *out__, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char* norm_ptr, int norm_len) { + PROTECT( + auto outputs__ = torch::fft_rfftn(*self, torch::IntArrayRef(s_data, s_len), torch::IntArrayRef(dim_data, dim_len), std::string(norm_ptr, norm_len)); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_fill_(tensor *out__, tensor self, scalar value) { PROTECT( auto outputs__ = torch::fill_(*self, *value); @@ -3189,6 +4010,27 @@ void atg_fill_diagonal_(tensor *out__, tensor self, scalar fill_value, int wrap) ) } +void atg_fix(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::fix(*self); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_fix_(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::fix_(*self); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_fix_out(tensor *out__, tensor out, tensor self) { + PROTECT( + auto outputs__ = torch::fix_out(*out, *self); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_flatten(tensor *out__, tensor self, int64_t start_dim, int64_t end_dim) { PROTECT( auto outputs__ = torch::flatten(*self, start_dim, end_dim); @@ -3203,6 +4045,20 @@ void atg_flip(tensor *out__, tensor self, int64_t *dims_data, int dims_len) { ) } +void atg_fliplr(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::fliplr(*self); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_flipud(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::flipud(*self); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_floor(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::floor(*self); @@ -3403,9 +4259,9 @@ void atg_frobenius_norm_out(tensor *out__, tensor out, tensor self, int64_t *dim ) } -void atg_from_file(tensor *out__, char* filename_ptr, int filename_len, int shared, int64_t size, int options_kind, int options_device) { +void atg_from_file(tensor *out__, char* filename_ptr, int filename_len, int shared, int64_t size_v, uint8_t size_null, int options_kind, int options_device) { PROTECT( - auto outputs__ = torch::from_file(std::string(filename_ptr, filename_len), (bool)shared, size, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + auto outputs__ = torch::from_file(std::string(filename_ptr, filename_len), (bool)shared, size_null ? c10::nullopt : c10::optional(size_v), at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); ) } @@ -3438,6 +4294,13 @@ void atg_gather(tensor *out__, tensor self, int64_t dim, tensor index, int spars ) } +void atg_gather_backward(tensor *out__, tensor grad, tensor self, int64_t dim, tensor index, int sparse_grad) { + PROTECT( + auto outputs__ = torch::gather_backward(*grad, *self, dim, *index, (bool)sparse_grad); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_gather_out(tensor *out__, tensor out, tensor self, int64_t dim, tensor index, int sparse_grad) { PROTECT( auto outputs__ = torch::gather_out(*out, *self, dim, *index, (bool)sparse_grad); @@ -3445,6 +4308,27 @@ void atg_gather_out(tensor *out__, tensor out, tensor self, int64_t dim, tensor ) } +void atg_gcd(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::gcd(*self, *other); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_gcd_(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::gcd_(*self, *other); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_gcd_out(tensor *out__, tensor out, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::gcd_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_ge(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = torch::ge(*self, *other); @@ -3573,6 +4457,90 @@ void atg_grad(tensor *out__, tensor self) { ) } +void atg_greater(tensor *out__, tensor self, scalar other) { + PROTECT( + auto outputs__ = torch::greater(*self, *other); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_greater1(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::greater(*self, *other); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_greater_(tensor *out__, tensor self, scalar other) { + PROTECT( + auto outputs__ = self->greater_(*other); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_greater_1(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = self->greater_(*other); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_greater_equal(tensor *out__, tensor self, scalar other) { + PROTECT( + auto outputs__ = torch::greater_equal(*self, *other); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_greater_equal1(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::greater_equal(*self, *other); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_greater_equal_(tensor *out__, tensor self, scalar other) { + PROTECT( + auto outputs__ = self->greater_equal_(*other); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_greater_equal_1(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = self->greater_equal_(*other); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_greater_equal_out(tensor *out__, tensor out, tensor self, scalar other) { + PROTECT( + auto outputs__ = torch::greater_equal_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_greater_equal_out1(tensor *out__, tensor out, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::greater_equal_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_greater_out(tensor *out__, tensor out, tensor self, scalar other) { + PROTECT( + auto outputs__ = torch::greater_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_greater_out1(tensor *out__, tensor out, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::greater_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_grid_sampler(tensor *out__, tensor input, tensor grid, int64_t interpolation_mode, int64_t padding_mode, int align_corners) { PROTECT( auto outputs__ = torch::grid_sampler(*input, *grid, interpolation_mode, padding_mode, (bool)align_corners); @@ -3766,6 +4734,34 @@ void atg_hardsigmoid_out(tensor *out__, tensor out, tensor self) { ) } +void atg_hardswish(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::hardswish(*self); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_hardswish_(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::hardswish_(*self); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_hardswish_backward(tensor *out__, tensor grad_output, tensor self) { + PROTECT( + auto outputs__ = torch::hardswish_backward(*grad_output, *self); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_hardswish_out(tensor *out__, tensor out, tensor self) { + PROTECT( + auto outputs__ = torch::hardswish_out(*out, *self); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_hardtanh(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::hardtanh(*self); @@ -3801,6 +4797,27 @@ void atg_hardtanh_out(tensor *out__, tensor out, tensor self) { ) } +void atg_heaviside(tensor *out__, tensor self, tensor values) { + PROTECT( + auto outputs__ = torch::heaviside(*self, *values); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_heaviside_(tensor *out__, tensor self, tensor values) { + PROTECT( + auto outputs__ = self->heaviside_(*values); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_heaviside_out(tensor *out__, tensor out, tensor self, tensor values) { + PROTECT( + auto outputs__ = torch::heaviside_out(*out, *self, *values); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_hinge_embedding_loss(tensor *out__, tensor self, tensor target, double margin, int64_t reduction) { PROTECT( auto outputs__ = torch::hinge_embedding_loss(*self, *target, margin, reduction); @@ -3836,6 +4853,62 @@ void atg_hspmm_out(tensor *out__, tensor out, tensor mat1, tensor mat2) { ) } +void atg_hstack(tensor *out__, tensor *tensors_data, int tensors_len) { + PROTECT( + auto outputs__ = torch::hstack(of_carray_tensor(tensors_data, tensors_len)); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_hstack_out(tensor *out__, tensor out, tensor *tensors_data, int tensors_len) { + PROTECT( + auto outputs__ = torch::hstack_out(*out, of_carray_tensor(tensors_data, tensors_len)); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_hypot(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::hypot(*self, *other); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_hypot_(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = self->hypot_(*other); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_hypot_out(tensor *out__, tensor out, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::hypot_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_i0(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::i0(*self); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_i0_(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::i0_(*self); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_i0_out(tensor *out__, tensor out, tensor self) { + PROTECT( + auto outputs__ = torch::i0_out(*out, *self); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_ifft(tensor *out__, tensor self, int64_t signal_ndim, int normalized) { PROTECT( auto outputs__ = torch::ifft(*self, signal_ndim, (bool)normalized); @@ -3962,6 +5035,13 @@ void atg_index_select(tensor *out__, tensor self, int64_t dim, tensor index) { ) } +void atg_index_select_backward(tensor *out__, tensor grad, int64_t *self_sizes_data, int self_sizes_len, int64_t dim, tensor index) { + PROTECT( + auto outputs__ = torch::index_select_backward(*grad, torch::IntArrayRef(self_sizes_data, self_sizes_len), dim, *index); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_index_select_out(tensor *out__, tensor out, tensor self, int64_t dim, tensor index) { PROTECT( auto outputs__ = torch::index_select_out(*out, *self, dim, *index); @@ -3976,6 +5056,13 @@ void atg_indices(tensor *out__, tensor self) { ) } +void atg_infinitely_differentiable_gelu_backward(tensor *out__, tensor grad, tensor self) { + PROTECT( + auto outputs__ = torch::infinitely_differentiable_gelu_backward(*grad, *self); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_instance_norm(tensor *out__, tensor input, tensor weight, tensor bias, tensor running_mean, tensor running_var, int use_input_stats, double momentum, double eps, int cudnn_enabled) { PROTECT( auto outputs__ = torch::instance_norm(*input, (weight ? *weight : torch::Tensor()), (bias ? *bias : torch::Tensor()), (running_mean ? *running_mean : torch::Tensor()), (running_var ? *running_var : torch::Tensor()), (bool)use_input_stats, momentum, eps, (bool)cudnn_enabled); @@ -4039,16 +5126,79 @@ void atg_isnan(tensor *out__, tensor self) { ) } -void atg_kl_div(tensor *out__, tensor self, tensor target, int64_t reduction) { +void atg_isneginf(tensor *out__, tensor self) { PROTECT( - auto outputs__ = torch::kl_div(*self, *target, reduction); + auto outputs__ = torch::isneginf(*self); out__[0] = new torch::Tensor(outputs__); ) } -void atg_kl_div_backward(tensor *out__, tensor grad_output, tensor self, tensor target, int64_t reduction) { +void atg_isneginf_out(tensor *out__, tensor out, tensor self) { PROTECT( - auto outputs__ = torch::kl_div_backward(*grad_output, *self, *target, reduction); + auto outputs__ = torch::isneginf_out(*out, *self); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_isposinf(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::isposinf(*self); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_isposinf_out(tensor *out__, tensor out, tensor self) { + PROTECT( + auto outputs__ = torch::isposinf_out(*out, *self); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_isreal(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::isreal(*self); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_istft(tensor *out__, tensor self, int64_t n_fft, int64_t hop_length_v, uint8_t hop_length_null, int64_t win_length_v, uint8_t win_length_null, tensor window, int center, int normalized, int onesided, int64_t length_v, uint8_t length_null, int return_complex) { + PROTECT( + auto outputs__ = torch::istft(*self, n_fft, hop_length_null ? c10::nullopt : c10::optional(hop_length_v), win_length_null ? c10::nullopt : c10::optional(win_length_v), (window ? *window : torch::Tensor()), (bool)center, (bool)normalized, (bool)onesided, length_null ? c10::nullopt : c10::optional(length_v), (bool)return_complex); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_kaiser_window(tensor *out__, int64_t window_length, int options_kind, int options_device) { + PROTECT( + auto outputs__ = torch::kaiser_window(window_length, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_kaiser_window1(tensor *out__, int64_t window_length, int periodic, int options_kind, int options_device) { + PROTECT( + auto outputs__ = torch::kaiser_window(window_length, (bool)periodic, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_kaiser_window2(tensor *out__, int64_t window_length, int periodic, double beta, int options_kind, int options_device) { + PROTECT( + auto outputs__ = torch::kaiser_window(window_length, (bool)periodic, beta, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_kl_div(tensor *out__, tensor self, tensor target, int64_t reduction, int log_target) { + PROTECT( + auto outputs__ = torch::kl_div(*self, *target, reduction, (bool)log_target); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_kl_div_backward(tensor *out__, tensor grad_output, tensor self, tensor target, int64_t reduction, int log_target) { + PROTECT( + auto outputs__ = torch::kl_div_backward(*grad_output, *self, *target, reduction, (bool)log_target); out__[0] = new torch::Tensor(outputs__); ) } @@ -4104,6 +5254,27 @@ void atg_layer_norm(tensor *out__, tensor input, int64_t *normalized_shape_data, ) } +void atg_lcm(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::lcm(*self, *other); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_lcm_(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::lcm_(*self, *other); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_lcm_out(tensor *out__, tensor out, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::lcm_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_le(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = torch::le(*self, *other); @@ -4216,6 +5387,90 @@ void atg_lerp_out1(tensor *out__, tensor out, tensor self, tensor end, tensor we ) } +void atg_less(tensor *out__, tensor self, scalar other) { + PROTECT( + auto outputs__ = torch::less(*self, *other); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_less1(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::less(*self, *other); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_less_(tensor *out__, tensor self, scalar other) { + PROTECT( + auto outputs__ = self->less_(*other); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_less_1(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = self->less_(*other); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_less_equal(tensor *out__, tensor self, scalar other) { + PROTECT( + auto outputs__ = torch::less_equal(*self, *other); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_less_equal1(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::less_equal(*self, *other); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_less_equal_(tensor *out__, tensor self, scalar other) { + PROTECT( + auto outputs__ = self->less_equal_(*other); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_less_equal_1(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = self->less_equal_(*other); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_less_equal_out(tensor *out__, tensor out, tensor self, scalar other) { + PROTECT( + auto outputs__ = torch::less_equal_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_less_equal_out1(tensor *out__, tensor out, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::less_equal_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_less_out(tensor *out__, tensor out, tensor self, scalar other) { + PROTECT( + auto outputs__ = torch::less_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_less_out1(tensor *out__, tensor out, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::less_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_lgamma(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::lgamma(*self); @@ -4237,6 +5492,41 @@ void atg_lgamma_out(tensor *out__, tensor out, tensor self) { ) } +void atg_linalg_det(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::linalg_det(*self); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_linalg_norm(tensor *out__, tensor self, scalar ord, int64_t *dim_data, int dim_len, int keepdim, int dtype) { + PROTECT( + auto outputs__ = torch::linalg_norm(*self, *ord, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim, at::ScalarType(dtype)); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_linalg_norm1(tensor *out__, tensor self, char* ord_ptr, int ord_len, int64_t *dim_data, int dim_len, int keepdim, int dtype) { + PROTECT( + auto outputs__ = torch::linalg_norm(*self, std::string(ord_ptr, ord_len), torch::IntArrayRef(dim_data, dim_len), (bool)keepdim, at::ScalarType(dtype)); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_linalg_norm_out(tensor *out__, tensor out, tensor self, scalar ord, int64_t *dim_data, int dim_len, int keepdim, int dtype) { + PROTECT( + auto outputs__ = torch::linalg_norm_out(*out, *self, *ord, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim, at::ScalarType(dtype)); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_linalg_norm_out1(tensor *out__, tensor out, tensor self, char* ord_ptr, int ord_len, int64_t *dim_data, int dim_len, int keepdim, int dtype) { + PROTECT( + auto outputs__ = torch::linalg_norm_out(*out, *self, std::string(ord_ptr, ord_len), torch::IntArrayRef(dim_data, dim_len), (bool)keepdim, at::ScalarType(dtype)); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_linear(tensor *out__, tensor input, tensor weight, tensor bias) { PROTECT( auto outputs__ = torch::linear(*input, *weight, (bias ? *bias : torch::Tensor())); @@ -4244,16 +5534,16 @@ void atg_linear(tensor *out__, tensor input, tensor weight, tensor bias) { ) } -void atg_linspace(tensor *out__, scalar start, scalar end, int64_t steps, int options_kind, int options_device) { +void atg_linspace(tensor *out__, scalar start, scalar end, int64_t steps_v, uint8_t steps_null, int options_kind, int options_device) { PROTECT( - auto outputs__ = torch::linspace(*start, *end, steps, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + auto outputs__ = torch::linspace(*start, *end, steps_null ? c10::nullopt : c10::optional(steps_v), at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); ) } -void atg_linspace_out(tensor *out__, tensor out, scalar start, scalar end, int64_t steps) { +void atg_linspace_out(tensor *out__, tensor out, scalar start, scalar end, int64_t steps_v, uint8_t steps_null) { PROTECT( - auto outputs__ = torch::linspace_out(*out, *start, *end, steps); + auto outputs__ = torch::linspace_out(*out, *start, *end, steps_null ? c10::nullopt : c10::optional(steps_v)); out__[0] = new torch::Tensor(outputs__); ) } @@ -4384,6 +5674,48 @@ void atg_log_softmax(tensor *out__, tensor self, int64_t dim, int dtype) { ) } +void atg_logaddexp(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::logaddexp(*self, *other); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_logaddexp2(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::logaddexp2(*self, *other); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_logaddexp2_out(tensor *out__, tensor out, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::logaddexp2_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_logaddexp_out(tensor *out__, tensor out, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::logaddexp_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_logcumsumexp(tensor *out__, tensor self, int64_t dim) { + PROTECT( + auto outputs__ = torch::logcumsumexp(*self, dim); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_logcumsumexp_out(tensor *out__, tensor out, tensor self, int64_t dim) { + PROTECT( + auto outputs__ = torch::logcumsumexp_out(*out, *self, dim); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_logdet(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::logdet(*self); @@ -4475,16 +5807,51 @@ void atg_logical_xor_out(tensor *out__, tensor out, tensor self, tensor other) { ) } -void atg_logspace(tensor *out__, scalar start, scalar end, int64_t steps, double base, int options_kind, int options_device) { +void atg_logit(tensor *out__, tensor self, double eps_v, uint8_t eps_null) { PROTECT( - auto outputs__ = torch::logspace(*start, *end, steps, base, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + auto outputs__ = torch::logit(*self, eps_null ? c10::nullopt : c10::optional(eps_v)); out__[0] = new torch::Tensor(outputs__); ) } -void atg_logspace_out(tensor *out__, tensor out, scalar start, scalar end, int64_t steps, double base) { +void atg_logit_(tensor *out__, tensor self, double eps_v, uint8_t eps_null) { PROTECT( - auto outputs__ = torch::logspace_out(*out, *start, *end, steps, base); + auto outputs__ = torch::logit_(*self, eps_null ? c10::nullopt : c10::optional(eps_v)); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_logit_backward(tensor *out__, tensor grad_output, tensor self, double eps_v, uint8_t eps_null) { + PROTECT( + auto outputs__ = torch::logit_backward(*grad_output, *self, eps_null ? c10::nullopt : c10::optional(eps_v)); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_logit_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, double eps_v, uint8_t eps_null) { + PROTECT( + auto outputs__ = torch::logit_backward_out(*grad_input, *grad_output, *self, eps_null ? c10::nullopt : c10::optional(eps_v)); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_logit_out(tensor *out__, tensor out, tensor self, double eps_v, uint8_t eps_null) { + PROTECT( + auto outputs__ = torch::logit_out(*out, *self, eps_null ? c10::nullopt : c10::optional(eps_v)); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_logspace(tensor *out__, scalar start, scalar end, int64_t steps_v, uint8_t steps_null, double base, int options_kind, int options_device) { + PROTECT( + auto outputs__ = torch::logspace(*start, *end, steps_null ? c10::nullopt : c10::optional(steps_v), base, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_logspace_out(tensor *out__, tensor out, scalar start, scalar end, int64_t steps_v, uint8_t steps_null, double base) { + PROTECT( + auto outputs__ = torch::logspace_out(*out, *start, *end, steps_null ? c10::nullopt : c10::optional(steps_v), base); out__[0] = new torch::Tensor(outputs__); ) } @@ -4657,6 +6024,13 @@ void atg_masked_select(tensor *out__, tensor self, tensor mask) { ) } +void atg_masked_select_backward(tensor *out__, tensor grad, tensor input, tensor mask) { + PROTECT( + auto outputs__ = torch::masked_select_backward(*grad, *input, *mask); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_masked_select_out(tensor *out__, tensor out, tensor self, tensor mask) { PROTECT( auto outputs__ = torch::masked_select_out(*out, *self, *mask); @@ -4678,6 +6052,20 @@ void atg_matmul_out(tensor *out__, tensor out, tensor self, tensor other) { ) } +void atg_matrix_exp(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::matrix_exp(*self); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_matrix_exp_backward(tensor *out__, tensor self, tensor grad) { + PROTECT( + auto outputs__ = torch::matrix_exp_backward(*self, *grad); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_matrix_power(tensor *out__, tensor self, int64_t n) { PROTECT( auto outputs__ = torch::matrix_power(*self, n); @@ -4881,9 +6269,16 @@ void atg_max_unpool3d_out(tensor *out__, tensor out, tensor self, tensor indices ) } -void atg_max_values(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int keepdim) { +void atg_maximum(tensor *out__, tensor self, tensor other) { PROTECT( - auto outputs__ = torch::max_values(*self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim); + auto outputs__ = torch::maximum(*self, *other); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_maximum_out(tensor *out__, tensor out, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::maximum_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); ) } @@ -4982,9 +6377,16 @@ void atg_min_out1(tensor *out__, tensor min, tensor min_indices, tensor self, in ) } -void atg_min_values(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int keepdim) { +void atg_minimum(tensor *out__, tensor self, tensor other) { PROTECT( - auto outputs__ = torch::min_values(*self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim); + auto outputs__ = torch::minimum(*self, *other); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_minimum_out(tensor *out__, tensor out, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::minimum_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); ) } @@ -5131,6 +6533,13 @@ void atg_mkldnn_max_pool2d(tensor *out__, tensor self, int64_t *kernel_size_data ) } +void atg_mkldnn_max_pool3d(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode) { + PROTECT( + auto outputs__ = torch::mkldnn_max_pool3d(*self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)ceil_mode); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_mkldnn_reorder_conv2d_weight(tensor *out__, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups) { PROTECT( auto outputs__ = torch::mkldnn_reorder_conv2d_weight(*self, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups); @@ -5138,6 +6547,13 @@ void atg_mkldnn_reorder_conv2d_weight(tensor *out__, tensor self, int64_t *paddi ) } +void atg_mkldnn_reorder_conv3d_weight(tensor *out__, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups) { + PROTECT( + auto outputs__ = torch::mkldnn_reorder_conv3d_weight(*self, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_mm(tensor *out__, tensor self, tensor mat2) { PROTECT( auto outputs__ = torch::mm(*self, *mat2); @@ -5168,6 +6584,20 @@ void atg_mode_out(tensor *out__, tensor values, tensor indices, tensor self, int ) } +void atg_movedim(tensor *out__, tensor self, int64_t *source_data, int source_len, int64_t *destination_data, int destination_len) { + PROTECT( + auto outputs__ = torch::movedim(*self, torch::IntArrayRef(source_data, source_len), torch::IntArrayRef(destination_data, destination_len)); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_movedim1(tensor *out__, tensor self, int64_t source, int64_t destination) { + PROTECT( + auto outputs__ = torch::movedim(*self, source, destination); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_mse_loss(tensor *out__, tensor self, tensor target, int64_t reduction) { PROTECT( auto outputs__ = torch::mse_loss(*self, *target, reduction); @@ -5287,6 +6717,41 @@ void atg_multinomial_out(tensor *out__, tensor out, tensor self, int64_t num_sam ) } +void atg_multiply(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::multiply(*self, *other); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_multiply1(tensor *out__, tensor self, scalar other) { + PROTECT( + auto outputs__ = torch::multiply(*self, *other); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_multiply_(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = self->multiply_(*other); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_multiply_1(tensor *out__, tensor self, scalar other) { + PROTECT( + auto outputs__ = self->multiply_(*other); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_multiply_out(tensor *out__, tensor out, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::multiply_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_mv(tensor *out__, tensor self, tensor vec) { PROTECT( auto outputs__ = torch::mv(*self, *vec); @@ -5315,6 +6780,55 @@ void atg_mvlgamma_(tensor *out__, tensor self, int64_t p) { ) } +void atg_nanquantile(tensor *out__, tensor self, double q, int64_t dim_v, uint8_t dim_null, int keepdim) { + PROTECT( + auto outputs__ = torch::nanquantile(*self, q, dim_null ? c10::nullopt : c10::optional(dim_v), (bool)keepdim); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_nanquantile1(tensor *out__, tensor self, tensor q, int64_t dim_v, uint8_t dim_null, int keepdim) { + PROTECT( + auto outputs__ = torch::nanquantile(*self, *q, dim_null ? c10::nullopt : c10::optional(dim_v), (bool)keepdim); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_nanquantile_out(tensor *out__, tensor out, tensor self, double q, int64_t dim_v, uint8_t dim_null, int keepdim) { + PROTECT( + auto outputs__ = torch::nanquantile_out(*out, *self, q, dim_null ? c10::nullopt : c10::optional(dim_v), (bool)keepdim); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_nanquantile_out1(tensor *out__, tensor out, tensor self, tensor q, int64_t dim_v, uint8_t dim_null, int keepdim) { + PROTECT( + auto outputs__ = torch::nanquantile_out(*out, *self, *q, dim_null ? c10::nullopt : c10::optional(dim_v), (bool)keepdim); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_nansum(tensor *out__, tensor self, int dtype) { + PROTECT( + auto outputs__ = torch::nansum(*self, at::ScalarType(dtype)); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_nansum1(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int keepdim, int dtype) { + PROTECT( + auto outputs__ = torch::nansum(*self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim, at::ScalarType(dtype)); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_nansum_out(tensor *out__, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim, int dtype) { + PROTECT( + auto outputs__ = torch::nansum_out(*out, *self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim, at::ScalarType(dtype)); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_narrow(tensor *out__, tensor self, int64_t dim, int64_t start, int64_t length) { PROTECT( auto outputs__ = torch::narrow(*self, dim, start, length); @@ -5354,6 +6868,15 @@ void atg_native_batch_norm_out(tensor *out__, tensor out, tensor save_mean, tens ) } +void atg_native_group_norm(tensor *out__, tensor input, tensor weight, tensor bias, int64_t n, int64_t C, int64_t HxW, int64_t group, double eps) { + PROTECT( + auto outputs__ = torch::native_group_norm(*input, (weight ? *weight : torch::Tensor()), (bias ? *bias : torch::Tensor()), n, C, HxW, group, eps); + out__[0] = new torch::Tensor(std::get<0>(outputs__)); + out__[1] = new torch::Tensor(std::get<1>(outputs__)); + out__[2] = new torch::Tensor(std::get<2>(outputs__)); + ) +} + void atg_native_layer_norm(tensor *out__, tensor input, tensor weight, tensor bias, int64_t M, int64_t n, double eps) { PROTECT( auto outputs__ = torch::native_layer_norm(*input, (weight ? *weight : torch::Tensor()), (bias ? *bias : torch::Tensor()), M, n, eps); @@ -5370,6 +6893,13 @@ void atg_native_norm(tensor *out__, tensor self) { ) } +void atg_native_norm1(tensor *out__, tensor self, scalar p, int64_t *dim_data, int dim_len, int keepdim, int dtype) { + PROTECT( + auto outputs__ = torch::native_norm(*self, *p, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim, at::ScalarType(dtype)); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_ne(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = torch::ne(*self, *other); @@ -5433,6 +6963,27 @@ void atg_neg_out(tensor *out__, tensor out, tensor self) { ) } +void atg_negative(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::negative(*self); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_negative_(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::negative_(*self); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_negative_out(tensor *out__, tensor out, tensor self) { + PROTECT( + auto outputs__ = torch::negative_out(*out, *self); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_new_empty(tensor *out__, tensor self, int64_t *size_data, int size_len, int options_kind, int options_device) { PROTECT( auto outputs__ = self->new_empty(torch::IntArrayRef(size_data, size_len), at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); @@ -5454,6 +7005,27 @@ void atg_new_zeros(tensor *out__, tensor self, int64_t *size_data, int size_len, ) } +void atg_nextafter(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::nextafter(*self, *other); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_nextafter_(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = self->nextafter_(*other); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_nextafter_out(tensor *out__, tensor out, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::nextafter_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_nll_loss(tensor *out__, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index) { PROTECT( auto outputs__ = torch::nll_loss(*self, *target, (weight ? *weight : torch::Tensor()), reduction, ignore_index); @@ -5621,6 +7193,48 @@ void atg_normal_out3(tensor *out__, tensor out, double mean, double std, int64_t ) } +void atg_not_equal(tensor *out__, tensor self, scalar other) { + PROTECT( + auto outputs__ = torch::not_equal(*self, *other); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_not_equal1(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::not_equal(*self, *other); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_not_equal_(tensor *out__, tensor self, scalar other) { + PROTECT( + auto outputs__ = self->not_equal_(*other); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_not_equal_1(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = self->not_equal_(*other); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_not_equal_out(tensor *out__, tensor out, tensor self, scalar other) { + PROTECT( + auto outputs__ = torch::not_equal_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_not_equal_out1(tensor *out__, tensor out, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::not_equal_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_nuclear_norm(tensor *out__, tensor self, int keepdim) { PROTECT( auto outputs__ = torch::nuclear_norm(*self, (bool)keepdim); @@ -5712,6 +7326,20 @@ void atg_ormqr_out(tensor *out__, tensor out, tensor self, tensor input2, tensor ) } +void atg_outer(tensor *out__, tensor self, tensor vec2) { + PROTECT( + auto outputs__ = torch::outer(*self, *vec2); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_outer_out(tensor *out__, tensor out, tensor self, tensor vec2) { + PROTECT( + auto outputs__ = torch::outer_out(*out, *self, *vec2); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_pairwise_distance(tensor *out__, tensor x1, tensor x2, double p, double eps, int keepdim) { PROTECT( auto outputs__ = torch::pairwise_distance(*x1, *x2, p, eps, (bool)keepdim); @@ -5768,6 +7396,20 @@ void atg_poisson_nll_loss(tensor *out__, tensor input, tensor target, int log_in ) } +void atg_polar(tensor *out__, tensor abs, tensor angle) { + PROTECT( + auto outputs__ = torch::polar(*abs, *angle); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_polar_out(tensor *out__, tensor out, tensor abs, tensor angle) { + PROTECT( + auto outputs__ = torch::polar_out(*out, *abs, *angle); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_polygamma(tensor *out__, int64_t n, tensor self) { PROTECT( auto outputs__ = torch::polygamma(n, *self); @@ -5918,6 +7560,34 @@ void atg_qr_out(tensor *out__, tensor Q, tensor R, tensor self, int some) { ) } +void atg_quantile(tensor *out__, tensor self, double q, int64_t dim_v, uint8_t dim_null, int keepdim) { + PROTECT( + auto outputs__ = torch::quantile(*self, q, dim_null ? c10::nullopt : c10::optional(dim_v), (bool)keepdim); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_quantile1(tensor *out__, tensor self, tensor q, int64_t dim_v, uint8_t dim_null, int keepdim) { + PROTECT( + auto outputs__ = torch::quantile(*self, *q, dim_null ? c10::nullopt : c10::optional(dim_v), (bool)keepdim); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_quantile_out(tensor *out__, tensor out, tensor self, double q, int64_t dim_v, uint8_t dim_null, int keepdim) { + PROTECT( + auto outputs__ = torch::quantile_out(*out, *self, q, dim_null ? c10::nullopt : c10::optional(dim_v), (bool)keepdim); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_quantile_out1(tensor *out__, tensor out, tensor self, tensor q, int64_t dim_v, uint8_t dim_null, int keepdim) { + PROTECT( + auto outputs__ = torch::quantile_out(*out, *self, *q, dim_null ? c10::nullopt : c10::optional(dim_v), (bool)keepdim); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_quantize_per_channel(tensor *out__, tensor self, tensor scales, tensor zero_points, int64_t axis, int dtype) { PROTECT( auto outputs__ = torch::quantize_per_channel(*self, *scales, *zero_points, axis, at::ScalarType(dtype)); @@ -5932,6 +7602,19 @@ void atg_quantize_per_tensor(tensor *out__, tensor self, double scale, int64_t z ) } +tensor *atg_quantize_per_tensor1(tensor *tensors_data, int tensors_len, tensor scales, tensor zero_points, int dtype) { + PROTECT( + auto outputs__ = torch::quantize_per_tensor(of_carray_tensor(tensors_data, tensors_len), *scales, *zero_points, at::ScalarType(dtype)); + int sz = outputs__.size(); + torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); + for (int i = 0; i < sz; ++i) + out__[i] = new torch::Tensor(outputs__[i]); + out__[sz] = nullptr; + return out__; + ) + return nullptr; +} + void atg_quantized_batch_norm(tensor *out__, tensor input, tensor weight, tensor bias, tensor mean, tensor var, double eps, double output_scale, int64_t output_zero_point) { PROTECT( auto outputs__ = torch::quantized_batch_norm(*input, (weight ? *weight : torch::Tensor()), (bias ? *bias : torch::Tensor()), *mean, *var, eps, output_scale, output_zero_point); @@ -5939,22 +7622,6 @@ void atg_quantized_batch_norm(tensor *out__, tensor input, tensor weight, tensor ) } -void atg_quantized_gru(tensor *out__, tensor input, tensor hx, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional, int batch_first) { - PROTECT( - auto outputs__ = torch::quantized_gru(*input, *hx, of_carray_tensor(params_data, params_len), (bool)has_biases, num_layers, dropout, (bool)train, (bool)bidirectional, (bool)batch_first); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); - ) -} - -void atg_quantized_gru1(tensor *out__, tensor data, tensor batch_sizes, tensor hx, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional) { - PROTECT( - auto outputs__ = torch::quantized_gru(*data, *batch_sizes, *hx, of_carray_tensor(params_data, params_len), (bool)has_biases, num_layers, dropout, (bool)train, (bool)bidirectional); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); - ) -} - void atg_quantized_gru_cell(tensor *out__, tensor input, tensor hx, tensor w_ih, tensor w_hh, tensor b_ih, tensor b_hh, tensor packed_ih, tensor packed_hh, tensor col_offsets_ih, tensor col_offsets_hh, scalar scale_ih, scalar scale_hh, scalar zero_point_ih, scalar zero_point_hh) { PROTECT( auto outputs__ = torch::quantized_gru_cell(*input, *hx, *w_ih, *w_hh, *b_ih, *b_hh, *packed_ih, *packed_hh, *col_offsets_ih, *col_offsets_hh, *scale_ih, *scale_hh, *zero_point_ih, *zero_point_hh); @@ -5962,24 +7629,6 @@ void atg_quantized_gru_cell(tensor *out__, tensor input, tensor hx, tensor w_ih, ) } -void atg_quantized_lstm(tensor *out__, tensor input, tensor *hx_data, int hx_len, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional, int batch_first, int dtype, int use_dynamic) { - PROTECT( - auto outputs__ = torch::quantized_lstm(*input, of_carray_tensor(hx_data, hx_len), of_carray_tensor(params_data, params_len), (bool)has_biases, num_layers, dropout, (bool)train, (bool)bidirectional, (bool)batch_first, at::ScalarType(dtype), (bool)use_dynamic); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); - out__[2] = new torch::Tensor(std::get<2>(outputs__)); - ) -} - -void atg_quantized_lstm1(tensor *out__, tensor data, tensor batch_sizes, tensor *hx_data, int hx_len, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional, int dtype, int use_dynamic) { - PROTECT( - auto outputs__ = torch::quantized_lstm(*data, *batch_sizes, of_carray_tensor(hx_data, hx_len), of_carray_tensor(params_data, params_len), (bool)has_biases, num_layers, dropout, (bool)train, (bool)bidirectional, at::ScalarType(dtype), (bool)use_dynamic); - out__[0] = new torch::Tensor(std::get<0>(outputs__)); - out__[1] = new torch::Tensor(std::get<1>(outputs__)); - out__[2] = new torch::Tensor(std::get<2>(outputs__)); - ) -} - void atg_quantized_lstm_cell(tensor *out__, tensor input, tensor *hx_data, int hx_len, tensor w_ih, tensor w_hh, tensor b_ih, tensor b_hh, tensor packed_ih, tensor packed_hh, tensor col_offsets_ih, tensor col_offsets_hh, scalar scale_ih, scalar scale_hh, scalar zero_point_ih, scalar zero_point_hh) { PROTECT( auto outputs__ = torch::quantized_lstm_cell(*input, of_carray_tensor(hx_data, hx_len), *w_ih, *w_hh, *b_ih, *b_hh, *packed_ih, *packed_hh, *col_offsets_ih, *col_offsets_hh, *scale_ih, *scale_hh, *zero_point_ih, *zero_point_hh); @@ -5988,6 +7637,13 @@ void atg_quantized_lstm_cell(tensor *out__, tensor input, tensor *hx_data, int h ) } +void atg_quantized_max_pool1d(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode) { + PROTECT( + auto outputs__ = torch::quantized_max_pool1d(*self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)ceil_mode); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_quantized_max_pool2d(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode) { PROTECT( auto outputs__ = torch::quantized_max_pool2d(*self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)ceil_mode); @@ -6009,6 +7665,27 @@ void atg_quantized_rnn_tanh_cell(tensor *out__, tensor input, tensor hx, tensor ) } +void atg_rad2deg(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::rad2deg(*self); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_rad2deg_(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::rad2deg_(*self); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_rad2deg_out(tensor *out__, tensor out, tensor self) { + PROTECT( + auto outputs__ = torch::rad2deg_out(*out, *self); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_rand(tensor *out__, int64_t *size_data, int size_len, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::rand(torch::IntArrayRef(size_data, size_len), at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); @@ -6107,9 +7784,9 @@ void atg_random_1(tensor *out__, tensor self, int64_t to) { ) } -void atg_random_2(tensor *out__, tensor self, int64_t from, int64_t to) { +void atg_random_2(tensor *out__, tensor self, int64_t from, int64_t to_v, uint8_t to_null) { PROTECT( - auto outputs__ = self->random_(from, to); + auto outputs__ = self->random_(from, to_null ? c10::nullopt : c10::optional(to_v)); out__[0] = new torch::Tensor(outputs__); ) } @@ -6324,16 +8001,16 @@ void atg_repeat_interleave(tensor *out__, tensor repeats) { ) } -void atg_repeat_interleave1(tensor *out__, tensor self, tensor repeats, int64_t dim) { +void atg_repeat_interleave1(tensor *out__, tensor self, tensor repeats, int64_t dim_v, uint8_t dim_null) { PROTECT( - auto outputs__ = torch::repeat_interleave(*self, *repeats, dim); + auto outputs__ = torch::repeat_interleave(*self, *repeats, dim_null ? c10::nullopt : c10::optional(dim_v)); out__[0] = new torch::Tensor(outputs__); ) } -void atg_repeat_interleave2(tensor *out__, tensor self, int64_t repeats, int64_t dim) { +void atg_repeat_interleave2(tensor *out__, tensor self, int64_t repeats, int64_t dim_v, uint8_t dim_null) { PROTECT( - auto outputs__ = torch::repeat_interleave(*self, repeats, dim); + auto outputs__ = torch::repeat_interleave(*self, repeats, dim_null ? c10::nullopt : c10::optional(dim_v)); out__[0] = new torch::Tensor(outputs__); ) } @@ -6422,9 +8099,9 @@ void atg_replication_pad3d_out(tensor *out__, tensor out, tensor self, int64_t * ) } -void atg_requires_grad_(tensor *out__, tensor self, int _requires_grad) { +void atg_requires_grad_(tensor *out__, tensor self, int requires_grad) { PROTECT( - auto outputs__ = self->requires_grad_((bool)_requires_grad); + auto outputs__ = self->requires_grad_((bool)requires_grad); out__[0] = new torch::Tensor(outputs__); ) } @@ -6657,6 +8334,20 @@ void atg_scatter_1(tensor *out__, tensor self, int64_t dim, tensor index, scalar ) } +void atg_scatter_2(tensor *out__, tensor self, int64_t dim, tensor index, tensor src, char* reduce_ptr, int reduce_len) { + PROTECT( + auto outputs__ = self->scatter_(dim, *index, *src, std::string(reduce_ptr, reduce_len)); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_scatter_3(tensor *out__, tensor self, int64_t dim, tensor index, scalar value, char* reduce_ptr, int reduce_len) { + PROTECT( + auto outputs__ = self->scatter_(dim, *index, *value, std::string(reduce_ptr, reduce_len)); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_scatter_add(tensor *out__, tensor self, int64_t dim, tensor index, tensor src) { PROTECT( auto outputs__ = torch::scatter_add(*self, dim, *index, *src); @@ -6671,6 +8362,27 @@ void atg_scatter_add_(tensor *out__, tensor self, int64_t dim, tensor index, ten ) } +void atg_searchsorted(tensor *out__, tensor sorted_sequence, tensor self, int out_int32, int right) { + PROTECT( + auto outputs__ = torch::searchsorted(*sorted_sequence, *self, (bool)out_int32, (bool)right); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_searchsorted1(tensor *out__, tensor sorted_sequence, scalar self_scalar, int out_int32, int right) { + PROTECT( + auto outputs__ = torch::searchsorted(*sorted_sequence, *self_scalar, (bool)out_int32, (bool)right); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_searchsorted_out(tensor *out__, tensor out, tensor sorted_sequence, tensor self, int out_int32, int right) { + PROTECT( + auto outputs__ = torch::searchsorted_out(*out, *sorted_sequence, *self, (bool)out_int32, (bool)right); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_select(tensor *out__, tensor self, int64_t dim, int64_t index) { PROTECT( auto outputs__ = torch::select(*self, dim, index); @@ -6678,6 +8390,13 @@ void atg_select(tensor *out__, tensor self, int64_t dim, int64_t index) { ) } +void atg_select_backward(tensor *out__, tensor grad, int64_t *input_sizes_data, int input_sizes_len, int64_t dim, int64_t index) { + PROTECT( + auto outputs__ = torch::select_backward(*grad, torch::IntArrayRef(input_sizes_data, input_sizes_len), dim, index); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_selu(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::selu(*self); @@ -6713,6 +8432,27 @@ void atg_set_requires_grad(tensor *out__, tensor self, int r) { ) } +void atg_sgn(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::sgn(*self); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_sgn_(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = self->sgn_(); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_sgn_out(tensor *out__, tensor out, tensor self) { + PROTECT( + auto outputs__ = torch::sgn_out(*out, *self); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_sigmoid(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::sigmoid(*self); @@ -6769,6 +8509,48 @@ void atg_sign_out(tensor *out__, tensor out, tensor self) { ) } +void atg_signbit(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::signbit(*self); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_signbit_out(tensor *out__, tensor out, tensor self) { + PROTECT( + auto outputs__ = torch::signbit_out(*out, *self); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_silu(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::silu(*self); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_silu_(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::silu_(*self); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_silu_backward(tensor *out__, tensor grad_output, tensor self) { + PROTECT( + auto outputs__ = torch::silu_backward(*grad_output, *self); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_silu_out(tensor *out__, tensor out, tensor self) { + PROTECT( + auto outputs__ = torch::silu_out(*out, *self); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_sin(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::sin(*self); @@ -6818,6 +8600,13 @@ void atg_slice(tensor *out__, tensor self, int64_t dim, int64_t start, int64_t e ) } +void atg_slice_backward(tensor *out__, tensor grad, int64_t *input_sizes_data, int input_sizes_len, int64_t dim, int64_t start, int64_t end, int64_t step) { + PROTECT( + auto outputs__ = torch::slice_backward(*grad, torch::IntArrayRef(input_sizes_data, input_sizes_len), dim, start, end, step); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_slogdet(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::slogdet(*self); @@ -6889,30 +8678,30 @@ void atg_smm(tensor *out__, tensor self, tensor mat2) { ) } -void atg_smooth_l1_loss(tensor *out__, tensor self, tensor target, int64_t reduction) { +void atg_smooth_l1_loss(tensor *out__, tensor self, tensor target, int64_t reduction, double beta) { PROTECT( - auto outputs__ = torch::smooth_l1_loss(*self, *target, reduction); + auto outputs__ = torch::smooth_l1_loss(*self, *target, reduction, beta); out__[0] = new torch::Tensor(outputs__); ) } -void atg_smooth_l1_loss_backward(tensor *out__, tensor grad_output, tensor self, tensor target, int64_t reduction) { +void atg_smooth_l1_loss_backward(tensor *out__, tensor grad_output, tensor self, tensor target, int64_t reduction, double beta) { PROTECT( - auto outputs__ = torch::smooth_l1_loss_backward(*grad_output, *self, *target, reduction); + auto outputs__ = torch::smooth_l1_loss_backward(*grad_output, *self, *target, reduction, beta); out__[0] = new torch::Tensor(outputs__); ) } -void atg_smooth_l1_loss_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor target, int64_t reduction) { +void atg_smooth_l1_loss_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor target, int64_t reduction, double beta) { PROTECT( - auto outputs__ = torch::smooth_l1_loss_backward_out(*grad_input, *grad_output, *self, *target, reduction); + auto outputs__ = torch::smooth_l1_loss_backward_out(*grad_input, *grad_output, *self, *target, reduction, beta); out__[0] = new torch::Tensor(outputs__); ) } -void atg_smooth_l1_loss_out(tensor *out__, tensor out, tensor self, tensor target, int64_t reduction) { +void atg_smooth_l1_loss_out(tensor *out__, tensor out, tensor self, tensor target, int64_t reduction, double beta) { PROTECT( - auto outputs__ = torch::smooth_l1_loss_out(*out, *self, *target, reduction); + auto outputs__ = torch::smooth_l1_loss_out(*out, *self, *target, reduction, beta); out__[0] = new torch::Tensor(outputs__); ) } @@ -7236,9 +9025,9 @@ void atg_std_out(tensor *out__, tensor out, tensor self, int64_t *dim_data, int ) } -void atg_stft(tensor *out__, tensor self, int64_t n_fft, int64_t hop_length, int64_t win_length, tensor window, int normalized, int onesided) { +void atg_stft(tensor *out__, tensor self, int64_t n_fft, int64_t hop_length_v, uint8_t hop_length_null, int64_t win_length_v, uint8_t win_length_null, tensor window, int normalized, int onesided, int return_complex) { PROTECT( - auto outputs__ = torch::stft(*self, n_fft, hop_length, win_length, (window ? *window : torch::Tensor()), (bool)normalized, (bool)onesided); + auto outputs__ = torch::stft(*self, n_fft, hop_length_null ? c10::nullopt : c10::optional(hop_length_v), win_length_null ? c10::nullopt : c10::optional(win_length_v), (window ? *window : torch::Tensor()), (bool)normalized, (bool)onesided, (bool)return_complex); out__[0] = new torch::Tensor(outputs__); ) } @@ -7278,6 +9067,41 @@ void atg_sub_out(tensor *out__, tensor out, tensor self, tensor other) { ) } +void atg_subtract(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::subtract(*self, *other); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_subtract1(tensor *out__, tensor self, scalar other) { + PROTECT( + auto outputs__ = torch::subtract(*self, *other); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_subtract_(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = self->subtract_(*other); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_subtract_1(tensor *out__, tensor self, scalar other) { + PROTECT( + auto outputs__ = self->subtract_(*other); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_subtract_out(tensor *out__, tensor out, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::subtract_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_sum(tensor *out__, tensor self, int dtype) { PROTECT( auto outputs__ = torch::sum(*self, at::ScalarType(dtype)); @@ -7361,6 +9185,13 @@ void atg_take(tensor *out__, tensor self, tensor index) { ) } +void atg_take_backward(tensor *out__, tensor grad, tensor input, tensor index) { + PROTECT( + auto outputs__ = torch::take_backward(*grad, *input, *index); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_take_out(tensor *out__, tensor out, tensor self, tensor index) { PROTECT( auto outputs__ = torch::take_out(*out, *self, *index); @@ -7566,6 +9397,13 @@ void atg_trace(tensor *out__, tensor self) { ) } +void atg_trace_backward(tensor *out__, tensor grad, int64_t *sizes_data, int sizes_len) { + PROTECT( + auto outputs__ = torch::trace_backward(*grad, torch::IntArrayRef(sizes_data, sizes_len)); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_transpose(tensor *out__, tensor self, int64_t dim0, int64_t dim1) { PROTECT( auto outputs__ = torch::transpose(*self, dim0, dim1); @@ -7749,6 +9587,13 @@ tensor *atg_unbind(tensor self, int64_t dim) { return nullptr; } +void atg_unflatten(tensor *out__, tensor self, int64_t dim, int64_t *sizes_data, int sizes_len) { + PROTECT( + auto outputs__ = self->unflatten(dim, torch::IntArrayRef(sizes_data, sizes_len)); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_unfold(tensor *out__, tensor self, int64_t dimension, int64_t size, int64_t step) { PROTECT( auto outputs__ = self->unfold(dimension, size, step); @@ -7756,6 +9601,13 @@ void atg_unfold(tensor *out__, tensor self, int64_t dimension, int64_t size, int ) } +void atg_unfold_backward(tensor *out__, tensor grad_in, int64_t *input_sizes_data, int input_sizes_len, int64_t dim, int64_t size, int64_t step) { + PROTECT( + auto outputs__ = torch::unfold_backward(*grad_in, torch::IntArrayRef(input_sizes_data, input_sizes_len), dim, size, step); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_uniform_(tensor *out__, tensor self, double from, double to) { PROTECT( auto outputs__ = self->uniform_(from, to); @@ -7763,9 +9615,9 @@ void atg_uniform_(tensor *out__, tensor self, double from, double to) { ) } -void atg_unique_consecutive(tensor *out__, tensor self, int return_inverse, int return_counts, int64_t dim) { +void atg_unique_consecutive(tensor *out__, tensor self, int return_inverse, int return_counts, int64_t dim_v, uint8_t dim_null) { PROTECT( - auto outputs__ = torch::unique_consecutive(*self, (bool)return_inverse, (bool)return_counts, dim); + auto outputs__ = torch::unique_consecutive(*self, (bool)return_inverse, (bool)return_counts, dim_null ? c10::nullopt : c10::optional(dim_v)); out__[0] = new torch::Tensor(std::get<0>(outputs__)); out__[1] = new torch::Tensor(std::get<1>(outputs__)); out__[2] = new torch::Tensor(std::get<2>(outputs__)); @@ -7790,6 +9642,45 @@ void atg_unique_dim_consecutive(tensor *out__, tensor self, int64_t dim, int ret ) } +tensor *atg_unsafe_chunk(tensor self, int64_t chunks, int64_t dim) { + PROTECT( + auto outputs__ = torch::unsafe_chunk(*self, chunks, dim); + int sz = outputs__.size(); + torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); + for (int i = 0; i < sz; ++i) + out__[i] = new torch::Tensor(outputs__[i]); + out__[sz] = nullptr; + return out__; + ) + return nullptr; +} + +tensor *atg_unsafe_split(tensor self, int64_t split_size, int64_t dim) { + PROTECT( + auto outputs__ = torch::unsafe_split(*self, split_size, dim); + int sz = outputs__.size(); + torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); + for (int i = 0; i < sz; ++i) + out__[i] = new torch::Tensor(outputs__[i]); + out__[sz] = nullptr; + return out__; + ) + return nullptr; +} + +tensor *atg_unsafe_split_with_sizes(tensor self, int64_t *split_sizes_data, int split_sizes_len, int64_t dim) { + PROTECT( + auto outputs__ = torch::unsafe_split_with_sizes(*self, torch::IntArrayRef(split_sizes_data, split_sizes_len), dim); + int sz = outputs__.size(); + torch::Tensor **out__ = (torch::Tensor**)malloc((sz + 1) * sizeof(torch::Tensor*)); + for (int i = 0; i < sz; ++i) + out__[i] = new torch::Tensor(outputs__[i]); + out__[sz] = nullptr; + return out__; + ) + return nullptr; +} + void atg_unsqueeze(tensor *out__, tensor self, int64_t dim) { PROTECT( auto outputs__ = torch::unsqueeze(*self, dim); @@ -7804,198 +9695,205 @@ void atg_unsqueeze_(tensor *out__, tensor self, int64_t dim) { ) } -void atg_upsample_bicubic2d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len, int align_corners, double scales_h, double scales_w) { +void atg_upsample_bicubic2d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len, int align_corners, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null) { PROTECT( - auto outputs__ = torch::upsample_bicubic2d(*self, torch::IntArrayRef(output_size_data, output_size_len), (bool)align_corners, scales_h, scales_w); + auto outputs__ = torch::upsample_bicubic2d(*self, torch::IntArrayRef(output_size_data, output_size_len), (bool)align_corners, scales_h_null ? c10::nullopt : c10::optional(scales_h_v), scales_w_null ? c10::nullopt : c10::optional(scales_w_v)); out__[0] = new torch::Tensor(outputs__); ) } -void atg_upsample_bicubic2d_backward(tensor *out__, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners, double scales_h, double scales_w) { +void atg_upsample_bicubic2d_backward(tensor *out__, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null) { PROTECT( - auto outputs__ = torch::upsample_bicubic2d_backward(*grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), (bool)align_corners, scales_h, scales_w); + auto outputs__ = torch::upsample_bicubic2d_backward(*grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), (bool)align_corners, scales_h_null ? c10::nullopt : c10::optional(scales_h_v), scales_w_null ? c10::nullopt : c10::optional(scales_w_v)); out__[0] = new torch::Tensor(outputs__); ) } -void atg_upsample_bicubic2d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners, double scales_h, double scales_w) { +void atg_upsample_bicubic2d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null) { PROTECT( - auto outputs__ = torch::upsample_bicubic2d_backward_out(*grad_input, *grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), (bool)align_corners, scales_h, scales_w); + auto outputs__ = torch::upsample_bicubic2d_backward_out(*grad_input, *grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), (bool)align_corners, scales_h_null ? c10::nullopt : c10::optional(scales_h_v), scales_w_null ? c10::nullopt : c10::optional(scales_w_v)); out__[0] = new torch::Tensor(outputs__); ) } -void atg_upsample_bicubic2d_out(tensor *out__, tensor out, tensor self, int64_t *output_size_data, int output_size_len, int align_corners, double scales_h, double scales_w) { +void atg_upsample_bicubic2d_out(tensor *out__, tensor out, tensor self, int64_t *output_size_data, int output_size_len, int align_corners, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null) { PROTECT( - auto outputs__ = torch::upsample_bicubic2d_out(*out, *self, torch::IntArrayRef(output_size_data, output_size_len), (bool)align_corners, scales_h, scales_w); + auto outputs__ = torch::upsample_bicubic2d_out(*out, *self, torch::IntArrayRef(output_size_data, output_size_len), (bool)align_corners, scales_h_null ? c10::nullopt : c10::optional(scales_h_v), scales_w_null ? c10::nullopt : c10::optional(scales_w_v)); out__[0] = new torch::Tensor(outputs__); ) } -void atg_upsample_bilinear2d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len, int align_corners, double scales_h, double scales_w) { +void atg_upsample_bilinear2d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len, int align_corners, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null) { PROTECT( - auto outputs__ = torch::upsample_bilinear2d(*self, torch::IntArrayRef(output_size_data, output_size_len), (bool)align_corners, scales_h, scales_w); + auto outputs__ = torch::upsample_bilinear2d(*self, torch::IntArrayRef(output_size_data, output_size_len), (bool)align_corners, scales_h_null ? c10::nullopt : c10::optional(scales_h_v), scales_w_null ? c10::nullopt : c10::optional(scales_w_v)); out__[0] = new torch::Tensor(outputs__); ) } -void atg_upsample_bilinear2d_backward(tensor *out__, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners, double scales_h, double scales_w) { +void atg_upsample_bilinear2d_backward(tensor *out__, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null) { PROTECT( - auto outputs__ = torch::upsample_bilinear2d_backward(*grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), (bool)align_corners, scales_h, scales_w); + auto outputs__ = torch::upsample_bilinear2d_backward(*grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), (bool)align_corners, scales_h_null ? c10::nullopt : c10::optional(scales_h_v), scales_w_null ? c10::nullopt : c10::optional(scales_w_v)); out__[0] = new torch::Tensor(outputs__); ) } -void atg_upsample_bilinear2d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners, double scales_h, double scales_w) { +void atg_upsample_bilinear2d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null) { PROTECT( - auto outputs__ = torch::upsample_bilinear2d_backward_out(*grad_input, *grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), (bool)align_corners, scales_h, scales_w); + auto outputs__ = torch::upsample_bilinear2d_backward_out(*grad_input, *grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), (bool)align_corners, scales_h_null ? c10::nullopt : c10::optional(scales_h_v), scales_w_null ? c10::nullopt : c10::optional(scales_w_v)); out__[0] = new torch::Tensor(outputs__); ) } -void atg_upsample_bilinear2d_out(tensor *out__, tensor out, tensor self, int64_t *output_size_data, int output_size_len, int align_corners, double scales_h, double scales_w) { +void atg_upsample_bilinear2d_out(tensor *out__, tensor out, tensor self, int64_t *output_size_data, int output_size_len, int align_corners, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null) { PROTECT( - auto outputs__ = torch::upsample_bilinear2d_out(*out, *self, torch::IntArrayRef(output_size_data, output_size_len), (bool)align_corners, scales_h, scales_w); + auto outputs__ = torch::upsample_bilinear2d_out(*out, *self, torch::IntArrayRef(output_size_data, output_size_len), (bool)align_corners, scales_h_null ? c10::nullopt : c10::optional(scales_h_v), scales_w_null ? c10::nullopt : c10::optional(scales_w_v)); out__[0] = new torch::Tensor(outputs__); ) } -void atg_upsample_linear1d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len, int align_corners, double scales) { +void atg_upsample_linear1d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len, int align_corners, double scales_v, uint8_t scales_null) { PROTECT( - auto outputs__ = torch::upsample_linear1d(*self, torch::IntArrayRef(output_size_data, output_size_len), (bool)align_corners, scales); + auto outputs__ = torch::upsample_linear1d(*self, torch::IntArrayRef(output_size_data, output_size_len), (bool)align_corners, scales_null ? c10::nullopt : c10::optional(scales_v)); out__[0] = new torch::Tensor(outputs__); ) } -void atg_upsample_linear1d_backward(tensor *out__, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners, double scales) { +void atg_upsample_linear1d_backward(tensor *out__, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners, double scales_v, uint8_t scales_null) { PROTECT( - auto outputs__ = torch::upsample_linear1d_backward(*grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), (bool)align_corners, scales); + auto outputs__ = torch::upsample_linear1d_backward(*grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), (bool)align_corners, scales_null ? c10::nullopt : c10::optional(scales_v)); out__[0] = new torch::Tensor(outputs__); ) } -void atg_upsample_linear1d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners, double scales) { +void atg_upsample_linear1d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners, double scales_v, uint8_t scales_null) { PROTECT( - auto outputs__ = torch::upsample_linear1d_backward_out(*grad_input, *grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), (bool)align_corners, scales); + auto outputs__ = torch::upsample_linear1d_backward_out(*grad_input, *grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), (bool)align_corners, scales_null ? c10::nullopt : c10::optional(scales_v)); out__[0] = new torch::Tensor(outputs__); ) } -void atg_upsample_linear1d_out(tensor *out__, tensor out, tensor self, int64_t *output_size_data, int output_size_len, int align_corners, double scales) { +void atg_upsample_linear1d_out(tensor *out__, tensor out, tensor self, int64_t *output_size_data, int output_size_len, int align_corners, double scales_v, uint8_t scales_null) { PROTECT( - auto outputs__ = torch::upsample_linear1d_out(*out, *self, torch::IntArrayRef(output_size_data, output_size_len), (bool)align_corners, scales); + auto outputs__ = torch::upsample_linear1d_out(*out, *self, torch::IntArrayRef(output_size_data, output_size_len), (bool)align_corners, scales_null ? c10::nullopt : c10::optional(scales_v)); out__[0] = new torch::Tensor(outputs__); ) } -void atg_upsample_nearest1d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len, double scales) { +void atg_upsample_nearest1d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len, double scales_v, uint8_t scales_null) { PROTECT( - auto outputs__ = torch::upsample_nearest1d(*self, torch::IntArrayRef(output_size_data, output_size_len), scales); + auto outputs__ = torch::upsample_nearest1d(*self, torch::IntArrayRef(output_size_data, output_size_len), scales_null ? c10::nullopt : c10::optional(scales_v)); out__[0] = new torch::Tensor(outputs__); ) } -void atg_upsample_nearest1d_backward(tensor *out__, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, double scales) { +void atg_upsample_nearest1d_backward(tensor *out__, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, double scales_v, uint8_t scales_null) { PROTECT( - auto outputs__ = torch::upsample_nearest1d_backward(*grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), scales); + auto outputs__ = torch::upsample_nearest1d_backward(*grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), scales_null ? c10::nullopt : c10::optional(scales_v)); out__[0] = new torch::Tensor(outputs__); ) } -void atg_upsample_nearest1d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, double scales) { +void atg_upsample_nearest1d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, double scales_v, uint8_t scales_null) { PROTECT( - auto outputs__ = torch::upsample_nearest1d_backward_out(*grad_input, *grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), scales); + auto outputs__ = torch::upsample_nearest1d_backward_out(*grad_input, *grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), scales_null ? c10::nullopt : c10::optional(scales_v)); out__[0] = new torch::Tensor(outputs__); ) } -void atg_upsample_nearest1d_out(tensor *out__, tensor out, tensor self, int64_t *output_size_data, int output_size_len, double scales) { +void atg_upsample_nearest1d_out(tensor *out__, tensor out, tensor self, int64_t *output_size_data, int output_size_len, double scales_v, uint8_t scales_null) { PROTECT( - auto outputs__ = torch::upsample_nearest1d_out(*out, *self, torch::IntArrayRef(output_size_data, output_size_len), scales); + auto outputs__ = torch::upsample_nearest1d_out(*out, *self, torch::IntArrayRef(output_size_data, output_size_len), scales_null ? c10::nullopt : c10::optional(scales_v)); out__[0] = new torch::Tensor(outputs__); ) } -void atg_upsample_nearest2d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len, double scales_h, double scales_w) { +void atg_upsample_nearest2d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null) { PROTECT( - auto outputs__ = torch::upsample_nearest2d(*self, torch::IntArrayRef(output_size_data, output_size_len), scales_h, scales_w); + auto outputs__ = torch::upsample_nearest2d(*self, torch::IntArrayRef(output_size_data, output_size_len), scales_h_null ? c10::nullopt : c10::optional(scales_h_v), scales_w_null ? c10::nullopt : c10::optional(scales_w_v)); out__[0] = new torch::Tensor(outputs__); ) } -void atg_upsample_nearest2d_backward(tensor *out__, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, double scales_h, double scales_w) { +void atg_upsample_nearest2d_backward(tensor *out__, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null) { PROTECT( - auto outputs__ = torch::upsample_nearest2d_backward(*grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), scales_h, scales_w); + auto outputs__ = torch::upsample_nearest2d_backward(*grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), scales_h_null ? c10::nullopt : c10::optional(scales_h_v), scales_w_null ? c10::nullopt : c10::optional(scales_w_v)); out__[0] = new torch::Tensor(outputs__); ) } -void atg_upsample_nearest2d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, double scales_h, double scales_w) { +void atg_upsample_nearest2d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null) { PROTECT( - auto outputs__ = torch::upsample_nearest2d_backward_out(*grad_input, *grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), scales_h, scales_w); + auto outputs__ = torch::upsample_nearest2d_backward_out(*grad_input, *grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), scales_h_null ? c10::nullopt : c10::optional(scales_h_v), scales_w_null ? c10::nullopt : c10::optional(scales_w_v)); out__[0] = new torch::Tensor(outputs__); ) } -void atg_upsample_nearest2d_out(tensor *out__, tensor out, tensor self, int64_t *output_size_data, int output_size_len, double scales_h, double scales_w) { +void atg_upsample_nearest2d_out(tensor *out__, tensor out, tensor self, int64_t *output_size_data, int output_size_len, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null) { PROTECT( - auto outputs__ = torch::upsample_nearest2d_out(*out, *self, torch::IntArrayRef(output_size_data, output_size_len), scales_h, scales_w); + auto outputs__ = torch::upsample_nearest2d_out(*out, *self, torch::IntArrayRef(output_size_data, output_size_len), scales_h_null ? c10::nullopt : c10::optional(scales_h_v), scales_w_null ? c10::nullopt : c10::optional(scales_w_v)); out__[0] = new torch::Tensor(outputs__); ) } -void atg_upsample_nearest3d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len, double scales_d, double scales_h, double scales_w) { +void atg_upsample_nearest3d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len, double scales_d_v, uint8_t scales_d_null, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null) { PROTECT( - auto outputs__ = torch::upsample_nearest3d(*self, torch::IntArrayRef(output_size_data, output_size_len), scales_d, scales_h, scales_w); + auto outputs__ = torch::upsample_nearest3d(*self, torch::IntArrayRef(output_size_data, output_size_len), scales_d_null ? c10::nullopt : c10::optional(scales_d_v), scales_h_null ? c10::nullopt : c10::optional(scales_h_v), scales_w_null ? c10::nullopt : c10::optional(scales_w_v)); out__[0] = new torch::Tensor(outputs__); ) } -void atg_upsample_nearest3d_backward(tensor *out__, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, double scales_d, double scales_h, double scales_w) { +void atg_upsample_nearest3d_backward(tensor *out__, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, double scales_d_v, uint8_t scales_d_null, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null) { PROTECT( - auto outputs__ = torch::upsample_nearest3d_backward(*grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), scales_d, scales_h, scales_w); + auto outputs__ = torch::upsample_nearest3d_backward(*grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), scales_d_null ? c10::nullopt : c10::optional(scales_d_v), scales_h_null ? c10::nullopt : c10::optional(scales_h_v), scales_w_null ? c10::nullopt : c10::optional(scales_w_v)); out__[0] = new torch::Tensor(outputs__); ) } -void atg_upsample_nearest3d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, double scales_d, double scales_h, double scales_w) { +void atg_upsample_nearest3d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, double scales_d_v, uint8_t scales_d_null, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null) { PROTECT( - auto outputs__ = torch::upsample_nearest3d_backward_out(*grad_input, *grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), scales_d, scales_h, scales_w); + auto outputs__ = torch::upsample_nearest3d_backward_out(*grad_input, *grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), scales_d_null ? c10::nullopt : c10::optional(scales_d_v), scales_h_null ? c10::nullopt : c10::optional(scales_h_v), scales_w_null ? c10::nullopt : c10::optional(scales_w_v)); out__[0] = new torch::Tensor(outputs__); ) } -void atg_upsample_nearest3d_out(tensor *out__, tensor out, tensor self, int64_t *output_size_data, int output_size_len, double scales_d, double scales_h, double scales_w) { +void atg_upsample_nearest3d_out(tensor *out__, tensor out, tensor self, int64_t *output_size_data, int output_size_len, double scales_d_v, uint8_t scales_d_null, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null) { PROTECT( - auto outputs__ = torch::upsample_nearest3d_out(*out, *self, torch::IntArrayRef(output_size_data, output_size_len), scales_d, scales_h, scales_w); + auto outputs__ = torch::upsample_nearest3d_out(*out, *self, torch::IntArrayRef(output_size_data, output_size_len), scales_d_null ? c10::nullopt : c10::optional(scales_d_v), scales_h_null ? c10::nullopt : c10::optional(scales_h_v), scales_w_null ? c10::nullopt : c10::optional(scales_w_v)); out__[0] = new torch::Tensor(outputs__); ) } -void atg_upsample_trilinear3d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len, int align_corners, double scales_d, double scales_h, double scales_w) { +void atg_upsample_trilinear3d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len, int align_corners, double scales_d_v, uint8_t scales_d_null, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null) { PROTECT( - auto outputs__ = torch::upsample_trilinear3d(*self, torch::IntArrayRef(output_size_data, output_size_len), (bool)align_corners, scales_d, scales_h, scales_w); + auto outputs__ = torch::upsample_trilinear3d(*self, torch::IntArrayRef(output_size_data, output_size_len), (bool)align_corners, scales_d_null ? c10::nullopt : c10::optional(scales_d_v), scales_h_null ? c10::nullopt : c10::optional(scales_h_v), scales_w_null ? c10::nullopt : c10::optional(scales_w_v)); out__[0] = new torch::Tensor(outputs__); ) } -void atg_upsample_trilinear3d_backward(tensor *out__, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners, double scales_d, double scales_h, double scales_w) { +void atg_upsample_trilinear3d_backward(tensor *out__, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners, double scales_d_v, uint8_t scales_d_null, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null) { PROTECT( - auto outputs__ = torch::upsample_trilinear3d_backward(*grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), (bool)align_corners, scales_d, scales_h, scales_w); + auto outputs__ = torch::upsample_trilinear3d_backward(*grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), (bool)align_corners, scales_d_null ? c10::nullopt : c10::optional(scales_d_v), scales_h_null ? c10::nullopt : c10::optional(scales_h_v), scales_w_null ? c10::nullopt : c10::optional(scales_w_v)); out__[0] = new torch::Tensor(outputs__); ) } -void atg_upsample_trilinear3d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners, double scales_d, double scales_h, double scales_w) { +void atg_upsample_trilinear3d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners, double scales_d_v, uint8_t scales_d_null, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null) { PROTECT( - auto outputs__ = torch::upsample_trilinear3d_backward_out(*grad_input, *grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), (bool)align_corners, scales_d, scales_h, scales_w); + auto outputs__ = torch::upsample_trilinear3d_backward_out(*grad_input, *grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), (bool)align_corners, scales_d_null ? c10::nullopt : c10::optional(scales_d_v), scales_h_null ? c10::nullopt : c10::optional(scales_h_v), scales_w_null ? c10::nullopt : c10::optional(scales_w_v)); out__[0] = new torch::Tensor(outputs__); ) } -void atg_upsample_trilinear3d_out(tensor *out__, tensor out, tensor self, int64_t *output_size_data, int output_size_len, int align_corners, double scales_d, double scales_h, double scales_w) { +void atg_upsample_trilinear3d_out(tensor *out__, tensor out, tensor self, int64_t *output_size_data, int output_size_len, int align_corners, double scales_d_v, uint8_t scales_d_null, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null) { PROTECT( - auto outputs__ = torch::upsample_trilinear3d_out(*out, *self, torch::IntArrayRef(output_size_data, output_size_len), (bool)align_corners, scales_d, scales_h, scales_w); + auto outputs__ = torch::upsample_trilinear3d_out(*out, *self, torch::IntArrayRef(output_size_data, output_size_len), (bool)align_corners, scales_d_null ? c10::nullopt : c10::optional(scales_d_v), scales_h_null ? c10::nullopt : c10::optional(scales_h_v), scales_w_null ? c10::nullopt : c10::optional(scales_w_v)); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_value_selecting_reduction_backward(tensor *out__, tensor grad, int64_t dim, tensor indices, int64_t *sizes_data, int sizes_len, int keepdim) { + PROTECT( + auto outputs__ = torch::value_selecting_reduction_backward(*grad, dim, *indices, torch::IntArrayRef(sizes_data, sizes_len), (bool)keepdim); out__[0] = new torch::Tensor(outputs__); ) } @@ -8007,6 +9905,13 @@ void atg_values(tensor *out__, tensor self) { ) } +void atg_vander(tensor *out__, tensor x, int64_t n_v, uint8_t n_null, int increasing) { + PROTECT( + auto outputs__ = torch::vander(*x, n_null ? c10::nullopt : c10::optional(n_v), (bool)increasing); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_var(tensor *out__, tensor self, int unbiased) { PROTECT( auto outputs__ = torch::var(*self, (bool)unbiased); @@ -8044,6 +9949,20 @@ void atg_var_out(tensor *out__, tensor out, tensor self, int64_t *dim_data, int ) } +void atg_vdot(tensor *out__, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::vdot(*self, *other); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_vdot_out(tensor *out__, tensor out, tensor self, tensor other) { + PROTECT( + auto outputs__ = torch::vdot_out(*out, *self, *other); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_view(tensor *out__, tensor self, int64_t *size_data, int size_len) { PROTECT( auto outputs__ = self->view(torch::IntArrayRef(size_data, size_len)); @@ -8058,6 +9977,34 @@ void atg_view_as(tensor *out__, tensor self, tensor other) { ) } +void atg_view_as_complex(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::view_as_complex(*self); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_view_as_real(tensor *out__, tensor self) { + PROTECT( + auto outputs__ = torch::view_as_real(*self); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_vstack(tensor *out__, tensor *tensors_data, int tensors_len) { + PROTECT( + auto outputs__ = torch::vstack(of_carray_tensor(tensors_data, tensors_len)); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_vstack_out(tensor *out__, tensor out, tensor *tensors_data, int tensors_len) { + PROTECT( + auto outputs__ = torch::vstack_out(*out, of_carray_tensor(tensors_data, tensors_len)); + out__[0] = new torch::Tensor(outputs__); + ) +} + tensor *atg_where(tensor condition) { PROTECT( auto outputs__ = torch::where(*condition); @@ -8078,6 +10025,27 @@ void atg_where1(tensor *out__, tensor condition, tensor self, tensor other) { ) } +void atg_where2(tensor *out__, tensor condition, scalar self_scalar, tensor other) { + PROTECT( + auto outputs__ = torch::where(*condition, *self_scalar, *other); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_where3(tensor *out__, tensor condition, tensor self, scalar other) { + PROTECT( + auto outputs__ = torch::where(*condition, *self, *other); + out__[0] = new torch::Tensor(outputs__); + ) +} + +void atg_where4(tensor *out__, tensor condition, scalar self_scalar, scalar other) { + PROTECT( + auto outputs__ = torch::where(*condition, *self_scalar, *other); + out__[0] = new torch::Tensor(outputs__); + ) +} + void atg_zero_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::zero_(*self); diff --git a/libtch/torch_api_generated.h b/libtch/torch_api_generated.h index e5d0b26..7afce36 100644 --- a/libtch/torch_api_generated.h +++ b/libtch/torch_api_generated.h @@ -22,11 +22,17 @@ void atg___xor__(tensor *, tensor self, scalar other); void atg___xor__1(tensor *, tensor self, tensor other); void atg__adaptive_avg_pool2d(tensor *, tensor self, int64_t *output_size_data, int output_size_len); void atg__adaptive_avg_pool2d_backward(tensor *, tensor grad_output, tensor self); -void atg__addr(tensor *, tensor self, tensor vec1, tensor vec2); -void atg__addr_(tensor *, tensor self, tensor vec1, tensor vec2); -void atg__addr_out(tensor *, tensor out, tensor self, tensor vec1, tensor vec2); +void atg__add_batch_dim(tensor *, tensor self, int64_t batch_dim, int64_t level); +void atg__add_relu(tensor *, tensor self, tensor other); +void atg__add_relu_(tensor *, tensor self, tensor other); +void atg__add_relu_out(tensor *, tensor out, tensor self, tensor other); +void atg__addmv_impl_(tensor *, tensor self, tensor self2, tensor mat, tensor vec); +void atg__aminmax(tensor *, tensor self); +void atg__aminmax1(tensor *, tensor self, int64_t dim, int keepdim); void atg__amp_update_scale(tensor *, tensor growth_tracker, tensor current_scale, tensor found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval); void atg__baddbmm_mkl_(tensor *, tensor self, tensor batch1, tensor batch2); +void atg__bmm(tensor *, tensor self, tensor mat2, int deterministic); +void atg__bmm_out(tensor *, tensor out, tensor self, tensor mat2, int deterministic); void atg__cast_byte(tensor *, tensor self, int non_blocking); void atg__cast_char(tensor *, tensor self, int non_blocking); void atg__cast_double(tensor *, tensor self, int non_blocking); @@ -41,7 +47,11 @@ void atg__cdist_backward(tensor *, tensor grad, tensor x1, tensor x2, double p, void atg__cholesky_helper(tensor *, tensor self, int upper); void atg__cholesky_solve_helper(tensor *, tensor self, tensor A, int upper); void atg__coalesced_(tensor *, tensor self, int coalesced); +void atg__compute_linear_combination(tensor *, tensor input, tensor coefficients); +void atg__compute_linear_combination_out(tensor *, tensor out, tensor input, tensor coefficients); +void atg__conj(tensor *, tensor self); void atg__convolution(tensor *, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int transposed, int64_t *output_padding_data, int output_padding_len, int64_t groups, int benchmark, int deterministic, int cudnn_enabled); +void atg__convolution1(tensor *, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int transposed, int64_t *output_padding_data, int output_padding_len, int64_t groups, int benchmark, int deterministic, int cudnn_enabled, int allow_tf32); void atg__convolution_nogroup(tensor *, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int transposed, int64_t *output_padding_data, int output_padding_len); void atg__copy_from(tensor *, tensor self, tensor dst, int non_blocking); void atg__ctc_loss(tensor *, tensor log_probs, tensor targets, int64_t *input_lengths_data, int input_lengths_len, int64_t *target_lengths_data, int target_lengths_len, int64_t blank, int zero_infinity); @@ -59,28 +69,35 @@ void atg__dirichlet_grad(tensor *, tensor x, tensor alpha, tensor total); void atg__embedding_bag(tensor *, tensor weight, tensor indices, tensor offsets, int scale_grad_by_freq, int64_t mode, int sparse, tensor per_sample_weights, int include_last_offset); void atg__embedding_bag_backward(tensor *, tensor grad, tensor indices, tensor offsets, tensor offset2bag, tensor bag_size, tensor maximum_indices, int64_t num_weights, int scale_grad_by_freq, int64_t mode, int sparse, tensor per_sample_weights); void atg__embedding_bag_dense_backward(tensor *, tensor grad, tensor indices, tensor offsets, tensor offset2bag, tensor bag_size, tensor maximum_indices, int64_t num_weights, int scale_grad_by_freq, int64_t mode, tensor per_sample_weights); +void atg__embedding_bag_forward_only(tensor *, tensor weight, tensor indices, tensor offsets, int scale_grad_by_freq, int64_t mode, int sparse, tensor per_sample_weights, int include_last_offset); void atg__embedding_bag_per_sample_weights_backward(tensor *, tensor grad, tensor weight, tensor indices, tensor offsets, tensor offset2bag, int64_t mode); void atg__embedding_bag_sparse_backward(tensor *, tensor grad, tensor indices, tensor offsets, tensor offset2bag, tensor bag_size, int64_t num_weights, int scale_grad_by_freq, int64_t mode, tensor per_sample_weights); void atg__empty_affine_quantized(tensor *, int64_t *size_data, int size_len, int options_kind, int options_device, double scale, int64_t zero_point); void atg__empty_per_channel_affine_quantized(tensor *, int64_t *size_data, int size_len, tensor scales, tensor zero_points, int64_t axis, int options_kind, int options_device); +void atg__euclidean_dist(tensor *, tensor x1, tensor x2); +void atg__fake_quantize_learnable_per_channel_affine(tensor *, tensor self, tensor scale, tensor zero_point, int64_t axis, int64_t quant_min, int64_t quant_max); +void atg__fake_quantize_learnable_per_channel_affine_backward(tensor *, tensor grad, tensor self, tensor scale, tensor zero_point, int64_t axis, int64_t quant_min, int64_t quant_max); +void atg__fake_quantize_learnable_per_tensor_affine(tensor *, tensor self, tensor scale, tensor zero_point, int64_t quant_min, int64_t quant_max); +void atg__fake_quantize_learnable_per_tensor_affine_backward(tensor *, tensor grad, tensor self, tensor scale, tensor zero_point, int64_t quant_min, int64_t quant_max); void atg__fft_with_size(tensor *, tensor self, int64_t signal_ndim, int complex_input, int complex_output, int inverse, int64_t *checked_signal_sizes_data, int checked_signal_sizes_len, int normalized, int onesided, int64_t *output_sizes_data, int output_sizes_len); +void atg__fft_with_size1(tensor *, tensor self, int64_t signal_ndim, int complex_input, int complex_output, int inverse, int64_t *checked_signal_sizes_data, int checked_signal_sizes_len, int64_t normalization, int onesided, int64_t *output_sizes_data, int output_sizes_len); void atg__fused_dropout(tensor *, tensor self, double p); void atg__gather_sparse_backward(tensor *, tensor self, int64_t dim, tensor index, tensor grad); +void atg__grid_sampler_2d_cpu_fallback(tensor *, tensor input, tensor grid, int64_t interpolation_mode, int64_t padding_mode, int align_corners); +void atg__grid_sampler_2d_cpu_fallback_backward(tensor *, tensor grad_output, tensor input, tensor grid, int64_t interpolation_mode, int64_t padding_mode, int align_corners); void atg__index_copy_(tensor *, tensor self, int64_t dim, tensor index, tensor source); void atg__index_put_impl_(tensor *, tensor self, tensor *indices_data, int indices_len, tensor values, int accumulate, int unsafe); void atg__indices(tensor *, tensor self); void atg__inverse_helper(tensor *, tensor self); void atg__log_softmax(tensor *, tensor self, int64_t dim, int half_to_float); void atg__log_softmax_backward_data(tensor *, tensor grad_output, tensor output, int64_t dim, tensor self); +void atg__logcumsumexp(tensor *, tensor self, int64_t dim); +void atg__logcumsumexp_out(tensor *, tensor out, tensor self, int64_t dim); void atg__lu_solve_helper(tensor *, tensor self, tensor LU_data, tensor LU_pivots); void atg__lu_with_info(tensor *, tensor self, int pivot, int check_errors); void atg__make_per_channel_quantized_tensor(tensor *, tensor self, tensor scale, tensor zero_point, int64_t axis); void atg__make_per_tensor_quantized_tensor(tensor *, tensor self, double scale, int64_t zero_point); void atg__masked_scale(tensor *, tensor self, tensor mask, double scale); -void atg__max(tensor *, tensor self, int64_t dim, int keepdim); -void atg__max_out(tensor *, tensor max, tensor max_indices, tensor self, int64_t dim, int keepdim); -void atg__min(tensor *, tensor self, int64_t dim, int keepdim); -void atg__min_out(tensor *, tensor min, tensor min_indices, tensor self, int64_t dim, int keepdim); void atg__mkldnn_reshape(tensor *, tensor self, int64_t *shape_data, int shape_len); void atg__mkldnn_transpose(tensor *, tensor self, int64_t dim0, int64_t dim1); void atg__mkldnn_transpose_(tensor *, tensor self, int64_t dim0, int64_t dim1); @@ -96,9 +113,11 @@ void atg__pack_padded_sequence_backward(tensor *, tensor grad, int64_t *input_si void atg__pad_packed_sequence(tensor *, tensor data, tensor batch_sizes, int batch_first, scalar padding_value, int64_t total_length); void atg__pdist_backward(tensor *, tensor grad, tensor self, double p, tensor pdist); void atg__qr_helper(tensor *, tensor self, int some); +void atg__remove_batch_dim(tensor *, tensor self, int64_t level, int64_t batch_size, int64_t out_dim); void atg__reshape_from_tensor(tensor *, tensor self, tensor shape); void atg__s_where(tensor *, tensor condition, tensor self, tensor other); void atg__sample_dirichlet(tensor *, tensor self); +void atg__saturate_weight_to_fp16(tensor *, tensor weight); void atg__shape_as_tensor(tensor *, tensor self); void atg__sobol_engine_draw(tensor *, tensor quasi, int64_t n, tensor sobolstate, int64_t dimension, int64_t num_generated, int dtype); void atg__sobol_engine_ff_(tensor *, tensor self, int64_t n, tensor sobolstate, int64_t dimension, int64_t num_generated); @@ -111,7 +130,13 @@ void atg__sparse_addmm(tensor *, tensor self, tensor sparse, tensor dense); void atg__sparse_coo_tensor_unsafe(tensor *, tensor indices, tensor values, int64_t *size_data, int size_len, int options_kind, int options_device); void atg__sparse_coo_tensor_with_dims(tensor *, int64_t sparse_dim, int64_t dense_dim, int64_t *size_data, int size_len, int options_kind, int options_device); void atg__sparse_coo_tensor_with_dims_and_tensors(tensor *, int64_t sparse_dim, int64_t dense_dim, int64_t *size_data, int size_len, tensor indices, tensor values, int options_kind, int options_device); +void atg__sparse_log_softmax(tensor *, tensor self, int64_t dim, int dtype); +void atg__sparse_log_softmax1(tensor *, tensor self, int64_t dim, int half_to_float); +void atg__sparse_log_softmax_backward_data(tensor *, tensor grad_output, tensor output, int64_t dim, tensor self); void atg__sparse_mm(tensor *, tensor sparse, tensor dense); +void atg__sparse_softmax(tensor *, tensor self, int64_t dim, int dtype); +void atg__sparse_softmax1(tensor *, tensor self, int64_t dim, int half_to_float); +void atg__sparse_softmax_backward_data(tensor *, tensor grad_output, tensor output, int64_t dim, tensor self); void atg__sparse_sum(tensor *, tensor self); void atg__sparse_sum1(tensor *, tensor self, int dtype); void atg__sparse_sum2(tensor *, tensor self, int64_t *dim_data, int dim_len); @@ -122,6 +147,9 @@ void atg__standard_gamma_grad(tensor *, tensor self, tensor output); void atg__std(tensor *, tensor self, int unbiased); void atg__svd_helper(tensor *, tensor self, int some, int compute_uv); void atg__symeig_helper(tensor *, tensor self, int eigenvectors, int upper); +void atg__test_optional_filled_intlist(tensor *, tensor values, int64_t *addends_data, int addends_len); +void atg__test_optional_intlist(tensor *, tensor values, int64_t *addends_data, int addends_len); +void atg__test_serialization_subcmul(tensor *, tensor self, tensor other); void atg__triangular_solve_helper(tensor *, tensor self, tensor A, int upper, int transpose, int unitriangular); void atg__trilinear(tensor *, tensor i1, tensor i2, tensor i3, int64_t *expand1_data, int expand1_len, int64_t *expand2_data, int expand2_len, int64_t *expand3_data, int expand3_len, int64_t *sumdim_data, int sumdim_len, int64_t unroll_dim); void atg__unique(tensor *, tensor self, int sorted, int return_inverse); @@ -136,9 +164,15 @@ void atg__weight_norm_differentiable_backward(tensor *, tensor grad_w, tensor sa void atg_abs(tensor *, tensor self); void atg_abs_(tensor *, tensor self); void atg_abs_out(tensor *, tensor out, tensor self); +void atg_absolute(tensor *, tensor self); +void atg_absolute_(tensor *, tensor self); +void atg_absolute_out(tensor *, tensor out, tensor self); void atg_acos(tensor *, tensor self); void atg_acos_(tensor *, tensor self); void atg_acos_out(tensor *, tensor out, tensor self); +void atg_acosh(tensor *, tensor self); +void atg_acosh_(tensor *, tensor self); +void atg_acosh_out(tensor *, tensor out, tensor self); void atg_adaptive_avg_pool1d(tensor *, tensor self, int64_t *output_size_data, int output_size_len); void atg_adaptive_avg_pool2d(tensor *, tensor self, int64_t *output_size_data, int output_size_len); void atg_adaptive_avg_pool2d_out(tensor *, tensor out, tensor self, int64_t *output_size_data, int output_size_len); @@ -188,6 +222,10 @@ void atg_all1(tensor *, tensor self, int64_t dim, int keepdim); void atg_all_out(tensor *, tensor out, tensor self, int64_t dim, int keepdim); void atg_alpha_dropout(tensor *, tensor input, double p, int train); void atg_alpha_dropout_(tensor *, tensor self, double p, int train); +void atg_amax(tensor *, tensor self, int64_t *dim_data, int dim_len, int keepdim); +void atg_amax_out(tensor *, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim); +void atg_amin(tensor *, tensor self, int64_t *dim_data, int dim_len, int keepdim); +void atg_amin_out(tensor *, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim); void atg_angle(tensor *, tensor self); void atg_angle_out(tensor *, tensor out, tensor self); void atg_any(tensor *, tensor self); @@ -198,29 +236,59 @@ void atg_arange1(tensor *, scalar start, scalar end, int options_kind, int optio void atg_arange2(tensor *, scalar start, scalar end, scalar step, int options_kind, int options_device); void atg_arange_out(tensor *, tensor out, scalar end); void atg_arange_out1(tensor *, tensor out, scalar start, scalar end); -void atg_argmax(tensor *, tensor self, int64_t dim, int keepdim); -void atg_argmin(tensor *, tensor self, int64_t dim, int keepdim); +void atg_arccos(tensor *, tensor self); +void atg_arccos_(tensor *, tensor self); +void atg_arccos_out(tensor *, tensor out, tensor self); +void atg_arccosh(tensor *, tensor self); +void atg_arccosh_(tensor *, tensor self); +void atg_arccosh_out(tensor *, tensor out, tensor self); +void atg_arcsin(tensor *, tensor self); +void atg_arcsin_(tensor *, tensor self); +void atg_arcsin_out(tensor *, tensor out, tensor self); +void atg_arcsinh(tensor *, tensor self); +void atg_arcsinh_(tensor *, tensor self); +void atg_arcsinh_out(tensor *, tensor out, tensor self); +void atg_arctan(tensor *, tensor self); +void atg_arctan_(tensor *, tensor self); +void atg_arctan_out(tensor *, tensor out, tensor self); +void atg_arctanh(tensor *, tensor self); +void atg_arctanh_(tensor *, tensor self); +void atg_arctanh_out(tensor *, tensor out, tensor self); +void atg_argmax(tensor *, tensor self, int64_t dim_v, uint8_t dim_null, int keepdim); +void atg_argmin(tensor *, tensor self, int64_t dim_v, uint8_t dim_null, int keepdim); void atg_argsort(tensor *, tensor self, int64_t dim, int descending); -void atg_as_strided(tensor *, tensor self, int64_t *size_data, int size_len, int64_t *stride_data, int stride_len, int64_t storage_offset); -void atg_as_strided_(tensor *, tensor self, int64_t *size_data, int size_len, int64_t *stride_data, int stride_len, int64_t storage_offset); +void atg_as_strided(tensor *, tensor self, int64_t *size_data, int size_len, int64_t *stride_data, int stride_len, int64_t storage_offset_v, uint8_t storage_offset_null); +void atg_as_strided_(tensor *, tensor self, int64_t *size_data, int size_len, int64_t *stride_data, int stride_len, int64_t storage_offset_v, uint8_t storage_offset_null); void atg_asin(tensor *, tensor self); void atg_asin_(tensor *, tensor self); void atg_asin_out(tensor *, tensor out, tensor self); +void atg_asinh(tensor *, tensor self); +void atg_asinh_(tensor *, tensor self); +void atg_asinh_out(tensor *, tensor out, tensor self); void atg_atan(tensor *, tensor self); void atg_atan2(tensor *, tensor self, tensor other); void atg_atan2_(tensor *, tensor self, tensor other); void atg_atan2_out(tensor *, tensor out, tensor self, tensor other); void atg_atan_(tensor *, tensor self); void atg_atan_out(tensor *, tensor out, tensor self); +void atg_atanh(tensor *, tensor self); +void atg_atanh_(tensor *, tensor self); +void atg_atanh_out(tensor *, tensor out, tensor self); +void atg_atleast_1d(tensor *, tensor self); +tensor *atg_atleast_1d1(tensor *tensors_data, int tensors_len); +void atg_atleast_2d(tensor *, tensor self); +tensor *atg_atleast_2d1(tensor *tensors_data, int tensors_len); +void atg_atleast_3d(tensor *, tensor self); +tensor *atg_atleast_3d1(tensor *tensors_data, int tensors_len); void atg_avg_pool1d(tensor *, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad); -void atg_avg_pool2d(tensor *, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override); -void atg_avg_pool2d_backward(tensor *, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override); -void atg_avg_pool2d_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override); -void atg_avg_pool2d_out(tensor *, tensor out, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override); -void atg_avg_pool3d(tensor *, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override); -void atg_avg_pool3d_backward(tensor *, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override); -void atg_avg_pool3d_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override); -void atg_avg_pool3d_out(tensor *, tensor out, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override); +void atg_avg_pool2d(tensor *, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override_v, uint8_t divisor_override_null); +void atg_avg_pool2d_backward(tensor *, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override_v, uint8_t divisor_override_null); +void atg_avg_pool2d_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override_v, uint8_t divisor_override_null); +void atg_avg_pool2d_out(tensor *, tensor out, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override_v, uint8_t divisor_override_null); +void atg_avg_pool3d(tensor *, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override_v, uint8_t divisor_override_null); +void atg_avg_pool3d_backward(tensor *, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override_v, uint8_t divisor_override_null); +void atg_avg_pool3d_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override_v, uint8_t divisor_override_null); +void atg_avg_pool3d_out(tensor *, tensor out, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override_v, uint8_t divisor_override_null); void atg_baddbmm(tensor *, tensor self, tensor batch1, tensor batch2); void atg_baddbmm_(tensor *, tensor self, tensor batch1, tensor batch2); void atg_baddbmm_out(tensor *, tensor out, tensor self, tensor batch1, tensor batch2); @@ -232,7 +300,7 @@ void atg_batch_norm_backward_reduce(tensor *, tensor grad_out, tensor input, ten void atg_batch_norm_elemt(tensor *, tensor input, tensor weight, tensor bias, tensor mean, tensor invstd, double eps); void atg_batch_norm_elemt_out(tensor *, tensor out, tensor input, tensor weight, tensor bias, tensor mean, tensor invstd, double eps); void atg_batch_norm_gather_stats(tensor *, tensor input, tensor mean, tensor invstd, tensor running_mean, tensor running_var, double momentum, double eps, int64_t count); -void atg_batch_norm_gather_stats_with_counts(tensor *, tensor input, tensor mean, tensor invstd, tensor running_mean, tensor running_var, double momentum, double eps, int64_t *counts_data, int counts_len); +void atg_batch_norm_gather_stats_with_counts(tensor *, tensor input, tensor mean, tensor invstd, tensor running_mean, tensor running_var, double momentum, double eps, tensor counts); void atg_batch_norm_stats(tensor *, tensor input, double eps); void atg_batch_norm_update_stats(tensor *, tensor input, tensor running_mean, tensor running_var, double momentum); void atg_bernoulli(tensor *, tensor self); @@ -248,6 +316,7 @@ void atg_binary_cross_entropy_out(tensor *, tensor out, tensor self, tensor targ void atg_binary_cross_entropy_with_logits(tensor *, tensor self, tensor target, tensor weight, tensor pos_weight, int64_t reduction); void atg_binary_cross_entropy_with_logits_backward(tensor *, tensor grad_output, tensor self, tensor target, tensor weight, tensor pos_weight, int64_t reduction); void atg_bincount(tensor *, tensor self, tensor weights, int64_t minlength); +void atg_binomial(tensor *, tensor count, tensor prob); void atg_bitwise_and(tensor *, tensor self, scalar other); void atg_bitwise_and1(tensor *, tensor self, tensor other); void atg_bitwise_and_(tensor *, tensor self, scalar other); @@ -271,20 +340,25 @@ void atg_bitwise_xor_out(tensor *, tensor out, tensor self, tensor other); void atg_bitwise_xor_out1(tensor *, tensor out, tensor self, scalar other); void atg_blackman_window(tensor *, int64_t window_length, int options_kind, int options_device); void atg_blackman_window1(tensor *, int64_t window_length, int periodic, int options_kind, int options_device); +void atg_block_diag(tensor *, tensor *tensors_data, int tensors_len); void atg_bmm(tensor *, tensor self, tensor mat2); void atg_bmm_out(tensor *, tensor out, tensor self, tensor mat2); tensor *atg_broadcast_tensors(tensor *tensors_data, int tensors_len); +void atg_bucketize(tensor *, tensor self, tensor boundaries, int out_int32, int right); +void atg_bucketize1(tensor *, scalar self_scalar, tensor boundaries, int out_int32, int right); +void atg_bucketize_out(tensor *, tensor out, tensor self, tensor boundaries, int out_int32, int right); void atg_cartesian_prod(tensor *, tensor *tensors_data, int tensors_len); void atg_cat(tensor *, tensor *tensors_data, int tensors_len, int64_t dim); void atg_cat_out(tensor *, tensor out, tensor *tensors_data, int tensors_len, int64_t dim); void atg_cauchy_(tensor *, tensor self, double median, double sigma); -void atg_cdist(tensor *, tensor x1, tensor x2, double p, int64_t compute_mode); +void atg_cdist(tensor *, tensor x1, tensor x2, double p, int64_t compute_mode_v, uint8_t compute_mode_null); void atg_ceil(tensor *, tensor self); void atg_ceil_(tensor *, tensor self); void atg_ceil_out(tensor *, tensor out, tensor self); void atg_celu(tensor *, tensor self); void atg_celu_(tensor *, tensor self); void atg_chain_matmul(tensor *, tensor *matrices_data, int matrices_len); +void atg_channel_shuffle(tensor *, tensor self, int64_t groups); void atg_cholesky(tensor *, tensor self, int upper); void atg_cholesky_inverse(tensor *, tensor self, int upper); void atg_cholesky_inverse_out(tensor *, tensor out, tensor self, int upper); @@ -301,12 +375,17 @@ void atg_clamp_min(tensor *, tensor self, scalar min); void atg_clamp_min_(tensor *, tensor self, scalar min); void atg_clamp_min_out(tensor *, tensor out, tensor self, scalar min); void atg_clamp_out(tensor *, tensor out, tensor self, scalar min, scalar max); +void atg_clip(tensor *, tensor self, scalar min, scalar max); +void atg_clip_(tensor *, tensor self, scalar min, scalar max); +void atg_clip_out(tensor *, tensor out, tensor self, scalar min, scalar max); void atg_coalesce(tensor *, tensor self); void atg_col2im(tensor *, tensor self, int64_t *output_size_data, int output_size_len, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len); void atg_col2im_backward(tensor *, tensor grad_output, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len); void atg_col2im_backward_out(tensor *, tensor grad_input, tensor grad_output, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len); void atg_col2im_out(tensor *, tensor out, tensor self, int64_t *output_size_data, int output_size_len, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len); void atg_combinations(tensor *, tensor self, int64_t r, int with_replacement); +void atg_complex(tensor *, tensor real, tensor imag); +void atg_complex_out(tensor *, tensor out, tensor real, tensor imag); void atg_conj(tensor *, tensor self); void atg_conj_out(tensor *, tensor out, tensor self); void atg_constant_pad_nd(tensor *, tensor self, int64_t *pad_data, int pad_len); @@ -330,8 +409,10 @@ void atg_cosh_(tensor *, tensor self); void atg_cosh_out(tensor *, tensor out, tensor self); void atg_cosine_embedding_loss(tensor *, tensor input1, tensor input2, tensor target, double margin, int64_t reduction); void atg_cosine_similarity(tensor *, tensor x1, tensor x2, int64_t dim, double eps); -void atg_cross(tensor *, tensor self, tensor other, int64_t dim); -void atg_cross_out(tensor *, tensor out, tensor self, tensor other, int64_t dim); +void atg_count_nonzero(tensor *, tensor self, int64_t *dim_data, int dim_len); +void atg_count_nonzero1(tensor *, tensor self, int64_t dim_v, uint8_t dim_null); +void atg_cross(tensor *, tensor self, tensor other, int64_t dim_v, uint8_t dim_null); +void atg_cross_out(tensor *, tensor out, tensor self, tensor other, int64_t dim_v, uint8_t dim_null); void atg_ctc_loss(tensor *, tensor log_probs, tensor targets, int64_t *input_lengths_data, int input_lengths_len, int64_t *target_lengths_data, int target_lengths_len, int64_t blank, int64_t reduction, int zero_infinity); void atg_ctc_loss1(tensor *, tensor log_probs, tensor targets, tensor input_lengths, tensor target_lengths, int64_t blank, int64_t reduction, int zero_infinity); void atg_cudnn_affine_grid_generator(tensor *, tensor theta, int64_t n, int64_t C, int64_t H, int64_t W); @@ -340,32 +421,42 @@ void atg_cudnn_batch_norm(tensor *, tensor input, tensor weight, tensor bias, te void atg_cudnn_batch_norm_backward(tensor *, tensor input, tensor grad_output, tensor weight, tensor running_mean, tensor running_var, tensor save_mean, tensor save_var, double epsilon, tensor reserveSpace); void atg_cudnn_convolution(tensor *, tensor self, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic); void atg_cudnn_convolution1(tensor *, tensor self, tensor weight, tensor bias, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic); -void atg_cudnn_convolution_backward_input(tensor *, int64_t *self_size_data, int self_size_len, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic); -void atg_cudnn_convolution_backward_weight(tensor *, int64_t *weight_size_data, int weight_size_len, tensor grad_output, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic); +void atg_cudnn_convolution2(tensor *, tensor self, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic, int allow_tf32); +void atg_cudnn_convolution_backward_input(tensor *, int64_t *self_size_data, int self_size_len, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic, int allow_tf32); +void atg_cudnn_convolution_backward_weight(tensor *, int64_t *weight_size_data, int weight_size_len, tensor grad_output, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic, int allow_tf32); void atg_cudnn_convolution_transpose(tensor *, tensor self, tensor weight, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic); void atg_cudnn_convolution_transpose1(tensor *, tensor self, tensor weight, tensor bias, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic); -void atg_cudnn_convolution_transpose_backward_input(tensor *, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic); -void atg_cudnn_convolution_transpose_backward_weight(tensor *, int64_t *weight_size_data, int weight_size_len, tensor grad_output, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic); +void atg_cudnn_convolution_transpose2(tensor *, tensor self, tensor weight, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic, int allow_tf32); +void atg_cudnn_convolution_transpose_backward_input(tensor *, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic, int allow_tf32); +void atg_cudnn_convolution_transpose_backward_weight(tensor *, int64_t *weight_size_data, int weight_size_len, tensor grad_output, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic, int allow_tf32); void atg_cudnn_grid_sampler(tensor *, tensor self, tensor grid); void atg_cudnn_grid_sampler_backward(tensor *, tensor self, tensor grid, tensor grad_output); void atg_cummax(tensor *, tensor self, int64_t dim); void atg_cummax_out(tensor *, tensor values, tensor indices, tensor self, int64_t dim); +void atg_cummaxmin_backward(tensor *, tensor grad, tensor input, tensor indices, int64_t dim); void atg_cummin(tensor *, tensor self, int64_t dim); void atg_cummin_out(tensor *, tensor values, tensor indices, tensor self, int64_t dim); void atg_cumprod(tensor *, tensor self, int64_t dim, int dtype); +void atg_cumprod_backward(tensor *, tensor grad, tensor input, int64_t dim); void atg_cumprod_out(tensor *, tensor out, tensor self, int64_t dim, int dtype); void atg_cumsum(tensor *, tensor self, int64_t dim, int dtype); void atg_cumsum_out(tensor *, tensor out, tensor self, int64_t dim, int dtype); void atg_data(tensor *, tensor self); +void atg_deg2rad(tensor *, tensor self); +void atg_deg2rad_(tensor *, tensor self); +void atg_deg2rad_out(tensor *, tensor out, tensor self); void atg_dequantize(tensor *, tensor self); +tensor *atg_dequantize1(tensor *tensors_data, int tensors_len); void atg_det(tensor *, tensor self); void atg_detach(tensor *, tensor self); void atg_detach_(tensor *, tensor self); void atg_diag(tensor *, tensor self, int64_t diagonal); +void atg_diag_backward(tensor *, tensor grad, int64_t *input_sizes_data, int input_sizes_len, int64_t diagonal); void atg_diag_embed(tensor *, tensor self, int64_t offset, int64_t dim1, int64_t dim2); void atg_diag_out(tensor *, tensor out, tensor self, int64_t diagonal); void atg_diagflat(tensor *, tensor self, int64_t offset); void atg_diagonal(tensor *, tensor self, int64_t offset, int64_t dim1, int64_t dim2); +void atg_diagonal_backward(tensor *, tensor grad, int64_t *input_sizes_data, int input_sizes_len, int64_t offset, int64_t dim1, int64_t dim2); void atg_digamma(tensor *, tensor self); void atg_digamma_(tensor *, tensor self); void atg_digamma_out(tensor *, tensor out, tensor self); @@ -375,10 +466,17 @@ void atg_div1(tensor *, tensor self, scalar other); void atg_div_(tensor *, tensor self, tensor other); void atg_div_1(tensor *, tensor self, scalar other); void atg_div_out(tensor *, tensor out, tensor self, tensor other); +void atg_divide(tensor *, tensor self, tensor other); +void atg_divide1(tensor *, tensor self, scalar other); +void atg_divide_(tensor *, tensor self, tensor other); +void atg_divide_1(tensor *, tensor self, scalar other); +void atg_divide_out(tensor *, tensor out, tensor self, tensor other); void atg_dot(tensor *, tensor self, tensor tensor); void atg_dot_out(tensor *, tensor out, tensor self, tensor tensor); void atg_dropout(tensor *, tensor input, double p, int train); void atg_dropout_(tensor *, tensor self, double p, int train); +void atg_dstack(tensor *, tensor *tensors_data, int tensors_len); +void atg_dstack_out(tensor *, tensor out, tensor *tensors_data, int tensors_len); void atg_eig(tensor *, tensor self, int eigenvectors); void atg_eig_out(tensor *, tensor e, tensor v, tensor self, int eigenvectors); void atg_einsum(tensor *, char* equation_ptr, int equation_len, tensor *tensors_data, int tensors_len); @@ -395,7 +493,9 @@ void atg_embedding_renorm_(tensor *, tensor self, tensor indices, double max_nor void atg_embedding_sparse_backward(tensor *, tensor grad, tensor indices, int64_t num_weights, int64_t padding_idx, int scale_grad_by_freq); void atg_empty(tensor *, int64_t *size_data, int size_len, int options_kind, int options_device); void atg_empty_like(tensor *, tensor self); +void atg_empty_meta(tensor *, int64_t *size_data, int size_len, int options_kind, int options_device); void atg_empty_out(tensor *, tensor out, int64_t *size_data, int size_len); +void atg_empty_quantized(tensor *, int64_t *size_data, int size_len, tensor qtensor); void atg_empty_strided(tensor *, int64_t *size_data, int size_len, int64_t *stride_data, int stride_len, int options_kind, int options_device); void atg_eq(tensor *, tensor self, scalar other); void atg_eq1(tensor *, tensor self, tensor other); @@ -413,6 +513,9 @@ void atg_erfinv(tensor *, tensor self); void atg_erfinv_(tensor *, tensor self); void atg_erfinv_out(tensor *, tensor out, tensor self); void atg_exp(tensor *, tensor self); +void atg_exp2(tensor *, tensor self); +void atg_exp2_(tensor *, tensor self); +void atg_exp2_out(tensor *, tensor out, tensor self); void atg_exp_(tensor *, tensor self); void atg_exp_out(tensor *, tensor out, tensor self); void atg_expand(tensor *, tensor self, int64_t *size_data, int size_len, int implicit); @@ -441,11 +544,26 @@ void atg_feature_alpha_dropout_(tensor *, tensor self, double p, int train); void atg_feature_dropout(tensor *, tensor input, double p, int train); void atg_feature_dropout_(tensor *, tensor self, double p, int train); void atg_fft(tensor *, tensor self, int64_t signal_ndim, int normalized); +void atg_fft_fft(tensor *, tensor self, int64_t n_v, uint8_t n_null, int64_t dim, char* norm_ptr, int norm_len); +void atg_fft_fftn(tensor *, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char* norm_ptr, int norm_len); +void atg_fft_hfft(tensor *, tensor self, int64_t n_v, uint8_t n_null, int64_t dim, char* norm_ptr, int norm_len); +void atg_fft_ifft(tensor *, tensor self, int64_t n_v, uint8_t n_null, int64_t dim, char* norm_ptr, int norm_len); +void atg_fft_ifftn(tensor *, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char* norm_ptr, int norm_len); +void atg_fft_ihfft(tensor *, tensor self, int64_t n_v, uint8_t n_null, int64_t dim, char* norm_ptr, int norm_len); +void atg_fft_irfft(tensor *, tensor self, int64_t n_v, uint8_t n_null, int64_t dim, char* norm_ptr, int norm_len); +void atg_fft_irfftn(tensor *, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char* norm_ptr, int norm_len); +void atg_fft_rfft(tensor *, tensor self, int64_t n_v, uint8_t n_null, int64_t dim, char* norm_ptr, int norm_len); +void atg_fft_rfftn(tensor *, tensor self, int64_t *s_data, int s_len, int64_t *dim_data, int dim_len, char* norm_ptr, int norm_len); void atg_fill_(tensor *, tensor self, scalar value); void atg_fill_1(tensor *, tensor self, tensor value); void atg_fill_diagonal_(tensor *, tensor self, scalar fill_value, int wrap); +void atg_fix(tensor *, tensor self); +void atg_fix_(tensor *, tensor self); +void atg_fix_out(tensor *, tensor out, tensor self); void atg_flatten(tensor *, tensor self, int64_t start_dim, int64_t end_dim); void atg_flip(tensor *, tensor self, int64_t *dims_data, int dims_len); +void atg_fliplr(tensor *, tensor self); +void atg_flipud(tensor *, tensor self); void atg_floor(tensor *, tensor self); void atg_floor_(tensor *, tensor self); void atg_floor_divide(tensor *, tensor self, tensor other); @@ -474,12 +592,16 @@ void atg_fractional_max_pool3d_out(tensor *, tensor output, tensor indices, tens void atg_frobenius_norm(tensor *, tensor self); void atg_frobenius_norm1(tensor *, tensor self, int64_t *dim_data, int dim_len, int keepdim); void atg_frobenius_norm_out(tensor *, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim); -void atg_from_file(tensor *, char* filename_ptr, int filename_len, int shared, int64_t size, int options_kind, int options_device); +void atg_from_file(tensor *, char* filename_ptr, int filename_len, int shared, int64_t size_v, uint8_t size_null, int options_kind, int options_device); void atg_full(tensor *, int64_t *size_data, int size_len, scalar fill_value, int options_kind, int options_device); void atg_full_like(tensor *, tensor self, scalar fill_value); void atg_full_out(tensor *, tensor out, int64_t *size_data, int size_len, scalar fill_value); void atg_gather(tensor *, tensor self, int64_t dim, tensor index, int sparse_grad); +void atg_gather_backward(tensor *, tensor grad, tensor self, int64_t dim, tensor index, int sparse_grad); void atg_gather_out(tensor *, tensor out, tensor self, int64_t dim, tensor index, int sparse_grad); +void atg_gcd(tensor *, tensor self, tensor other); +void atg_gcd_(tensor *, tensor self, tensor other); +void atg_gcd_out(tensor *, tensor out, tensor self, tensor other); void atg_ge(tensor *, tensor self, scalar other); void atg_ge1(tensor *, tensor self, tensor other); void atg_ge_(tensor *, tensor self, scalar other); @@ -498,6 +620,18 @@ void atg_glu_backward(tensor *, tensor grad_output, tensor self, int64_t dim); void atg_glu_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, int64_t dim); void atg_glu_out(tensor *, tensor out, tensor self, int64_t dim); void atg_grad(tensor *, tensor self); +void atg_greater(tensor *, tensor self, scalar other); +void atg_greater1(tensor *, tensor self, tensor other); +void atg_greater_(tensor *, tensor self, scalar other); +void atg_greater_1(tensor *, tensor self, tensor other); +void atg_greater_equal(tensor *, tensor self, scalar other); +void atg_greater_equal1(tensor *, tensor self, tensor other); +void atg_greater_equal_(tensor *, tensor self, scalar other); +void atg_greater_equal_1(tensor *, tensor self, tensor other); +void atg_greater_equal_out(tensor *, tensor out, tensor self, scalar other); +void atg_greater_equal_out1(tensor *, tensor out, tensor self, tensor other); +void atg_greater_out(tensor *, tensor out, tensor self, scalar other); +void atg_greater_out1(tensor *, tensor out, tensor self, tensor other); void atg_grid_sampler(tensor *, tensor input, tensor grid, int64_t interpolation_mode, int64_t padding_mode, int align_corners); void atg_grid_sampler_2d(tensor *, tensor input, tensor grid, int64_t interpolation_mode, int64_t padding_mode, int align_corners); void atg_grid_sampler_2d_backward(tensor *, tensor grad_output, tensor input, tensor grid, int64_t interpolation_mode, int64_t padding_mode, int align_corners); @@ -525,16 +659,31 @@ void atg_hardsigmoid(tensor *, tensor self); void atg_hardsigmoid_(tensor *, tensor self); void atg_hardsigmoid_backward(tensor *, tensor grad_output, tensor self); void atg_hardsigmoid_out(tensor *, tensor out, tensor self); +void atg_hardswish(tensor *, tensor self); +void atg_hardswish_(tensor *, tensor self); +void atg_hardswish_backward(tensor *, tensor grad_output, tensor self); +void atg_hardswish_out(tensor *, tensor out, tensor self); void atg_hardtanh(tensor *, tensor self); void atg_hardtanh_(tensor *, tensor self); void atg_hardtanh_backward(tensor *, tensor grad_output, tensor self, scalar min_val, scalar max_val); void atg_hardtanh_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, scalar min_val, scalar max_val); void atg_hardtanh_out(tensor *, tensor out, tensor self); +void atg_heaviside(tensor *, tensor self, tensor values); +void atg_heaviside_(tensor *, tensor self, tensor values); +void atg_heaviside_out(tensor *, tensor out, tensor self, tensor values); void atg_hinge_embedding_loss(tensor *, tensor self, tensor target, double margin, int64_t reduction); void atg_histc(tensor *, tensor self, int64_t bins); void atg_histc_out(tensor *, tensor out, tensor self, int64_t bins); void atg_hspmm(tensor *, tensor mat1, tensor mat2); void atg_hspmm_out(tensor *, tensor out, tensor mat1, tensor mat2); +void atg_hstack(tensor *, tensor *tensors_data, int tensors_len); +void atg_hstack_out(tensor *, tensor out, tensor *tensors_data, int tensors_len); +void atg_hypot(tensor *, tensor self, tensor other); +void atg_hypot_(tensor *, tensor self, tensor other); +void atg_hypot_out(tensor *, tensor out, tensor self, tensor other); +void atg_i0(tensor *, tensor self); +void atg_i0_(tensor *, tensor self); +void atg_i0_out(tensor *, tensor out, tensor self); void atg_ifft(tensor *, tensor self, int64_t signal_ndim, int normalized); void atg_im2col(tensor *, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len); void atg_im2col_backward(tensor *, tensor grad_output, int64_t *input_size_data, int input_size_len, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len); @@ -553,8 +702,10 @@ void atg_index_fill_1(tensor *, tensor self, int64_t dim, tensor index, tensor v void atg_index_put(tensor *, tensor self, tensor *indices_data, int indices_len, tensor values, int accumulate); void atg_index_put_(tensor *, tensor self, tensor *indices_data, int indices_len, tensor values, int accumulate); void atg_index_select(tensor *, tensor self, int64_t dim, tensor index); +void atg_index_select_backward(tensor *, tensor grad, int64_t *self_sizes_data, int self_sizes_len, int64_t dim, tensor index); void atg_index_select_out(tensor *, tensor out, tensor self, int64_t dim, tensor index); void atg_indices(tensor *, tensor self); +void atg_infinitely_differentiable_gelu_backward(tensor *, tensor grad, tensor self); void atg_instance_norm(tensor *, tensor input, tensor weight, tensor bias, tensor running_mean, tensor running_var, int use_input_stats, double momentum, double eps, int cudnn_enabled); void atg_int_repr(tensor *, tensor self); void atg_inverse(tensor *, tensor self); @@ -564,8 +715,17 @@ void atg_isclose(tensor *, tensor self, tensor other, double rtol, double atol, void atg_isfinite(tensor *, tensor self); void atg_isinf(tensor *, tensor self); void atg_isnan(tensor *, tensor self); -void atg_kl_div(tensor *, tensor self, tensor target, int64_t reduction); -void atg_kl_div_backward(tensor *, tensor grad_output, tensor self, tensor target, int64_t reduction); +void atg_isneginf(tensor *, tensor self); +void atg_isneginf_out(tensor *, tensor out, tensor self); +void atg_isposinf(tensor *, tensor self); +void atg_isposinf_out(tensor *, tensor out, tensor self); +void atg_isreal(tensor *, tensor self); +void atg_istft(tensor *, tensor self, int64_t n_fft, int64_t hop_length_v, uint8_t hop_length_null, int64_t win_length_v, uint8_t win_length_null, tensor window, int center, int normalized, int onesided, int64_t length_v, uint8_t length_null, int return_complex); +void atg_kaiser_window(tensor *, int64_t window_length, int options_kind, int options_device); +void atg_kaiser_window1(tensor *, int64_t window_length, int periodic, int options_kind, int options_device); +void atg_kaiser_window2(tensor *, int64_t window_length, int periodic, double beta, int options_kind, int options_device); +void atg_kl_div(tensor *, tensor self, tensor target, int64_t reduction, int log_target); +void atg_kl_div_backward(tensor *, tensor grad_output, tensor self, tensor target, int64_t reduction, int log_target); void atg_kthvalue(tensor *, tensor self, int64_t k, int64_t dim, int keepdim); void atg_kthvalue_out(tensor *, tensor values, tensor indices, tensor self, int64_t k, int64_t dim, int keepdim); void atg_l1_loss(tensor *, tensor self, tensor target, int64_t reduction); @@ -573,6 +733,9 @@ void atg_l1_loss_backward(tensor *, tensor grad_output, tensor self, tensor targ void atg_l1_loss_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, tensor target, int64_t reduction); void atg_l1_loss_out(tensor *, tensor out, tensor self, tensor target, int64_t reduction); void atg_layer_norm(tensor *, tensor input, int64_t *normalized_shape_data, int normalized_shape_len, tensor weight, tensor bias, double eps, int cudnn_enable); +void atg_lcm(tensor *, tensor self, tensor other); +void atg_lcm_(tensor *, tensor self, tensor other); +void atg_lcm_out(tensor *, tensor out, tensor self, tensor other); void atg_le(tensor *, tensor self, scalar other); void atg_le1(tensor *, tensor self, tensor other); void atg_le_(tensor *, tensor self, scalar other); @@ -589,12 +752,29 @@ void atg_lerp_(tensor *, tensor self, tensor end, scalar weight); void atg_lerp_1(tensor *, tensor self, tensor end, tensor weight); void atg_lerp_out(tensor *, tensor out, tensor self, tensor end, scalar weight); void atg_lerp_out1(tensor *, tensor out, tensor self, tensor end, tensor weight); +void atg_less(tensor *, tensor self, scalar other); +void atg_less1(tensor *, tensor self, tensor other); +void atg_less_(tensor *, tensor self, scalar other); +void atg_less_1(tensor *, tensor self, tensor other); +void atg_less_equal(tensor *, tensor self, scalar other); +void atg_less_equal1(tensor *, tensor self, tensor other); +void atg_less_equal_(tensor *, tensor self, scalar other); +void atg_less_equal_1(tensor *, tensor self, tensor other); +void atg_less_equal_out(tensor *, tensor out, tensor self, scalar other); +void atg_less_equal_out1(tensor *, tensor out, tensor self, tensor other); +void atg_less_out(tensor *, tensor out, tensor self, scalar other); +void atg_less_out1(tensor *, tensor out, tensor self, tensor other); void atg_lgamma(tensor *, tensor self); void atg_lgamma_(tensor *, tensor self); void atg_lgamma_out(tensor *, tensor out, tensor self); +void atg_linalg_det(tensor *, tensor self); +void atg_linalg_norm(tensor *, tensor self, scalar ord, int64_t *dim_data, int dim_len, int keepdim, int dtype); +void atg_linalg_norm1(tensor *, tensor self, char* ord_ptr, int ord_len, int64_t *dim_data, int dim_len, int keepdim, int dtype); +void atg_linalg_norm_out(tensor *, tensor out, tensor self, scalar ord, int64_t *dim_data, int dim_len, int keepdim, int dtype); +void atg_linalg_norm_out1(tensor *, tensor out, tensor self, char* ord_ptr, int ord_len, int64_t *dim_data, int dim_len, int keepdim, int dtype); void atg_linear(tensor *, tensor input, tensor weight, tensor bias); -void atg_linspace(tensor *, scalar start, scalar end, int64_t steps, int options_kind, int options_device); -void atg_linspace_out(tensor *, tensor out, scalar start, scalar end, int64_t steps); +void atg_linspace(tensor *, scalar start, scalar end, int64_t steps_v, uint8_t steps_null, int options_kind, int options_device); +void atg_linspace_out(tensor *, tensor out, scalar start, scalar end, int64_t steps_v, uint8_t steps_null); void atg_log(tensor *, tensor self); void atg_log10(tensor *, tensor self); void atg_log10_(tensor *, tensor self); @@ -613,6 +793,12 @@ void atg_log_sigmoid_backward(tensor *, tensor grad_output, tensor self, tensor void atg_log_sigmoid_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, tensor buffer); void atg_log_sigmoid_out(tensor *, tensor out, tensor self); void atg_log_softmax(tensor *, tensor self, int64_t dim, int dtype); +void atg_logaddexp(tensor *, tensor self, tensor other); +void atg_logaddexp2(tensor *, tensor self, tensor other); +void atg_logaddexp2_out(tensor *, tensor out, tensor self, tensor other); +void atg_logaddexp_out(tensor *, tensor out, tensor self, tensor other); +void atg_logcumsumexp(tensor *, tensor self, int64_t dim); +void atg_logcumsumexp_out(tensor *, tensor out, tensor self, int64_t dim); void atg_logdet(tensor *, tensor self); void atg_logical_and(tensor *, tensor self, tensor other); void atg_logical_and_(tensor *, tensor self, tensor other); @@ -626,8 +812,13 @@ void atg_logical_or_out(tensor *, tensor out, tensor self, tensor other); void atg_logical_xor(tensor *, tensor self, tensor other); void atg_logical_xor_(tensor *, tensor self, tensor other); void atg_logical_xor_out(tensor *, tensor out, tensor self, tensor other); -void atg_logspace(tensor *, scalar start, scalar end, int64_t steps, double base, int options_kind, int options_device); -void atg_logspace_out(tensor *, tensor out, scalar start, scalar end, int64_t steps, double base); +void atg_logit(tensor *, tensor self, double eps_v, uint8_t eps_null); +void atg_logit_(tensor *, tensor self, double eps_v, uint8_t eps_null); +void atg_logit_backward(tensor *, tensor grad_output, tensor self, double eps_v, uint8_t eps_null); +void atg_logit_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, double eps_v, uint8_t eps_null); +void atg_logit_out(tensor *, tensor out, tensor self, double eps_v, uint8_t eps_null); +void atg_logspace(tensor *, scalar start, scalar end, int64_t steps_v, uint8_t steps_null, double base, int options_kind, int options_device); +void atg_logspace_out(tensor *, tensor out, scalar start, scalar end, int64_t steps_v, uint8_t steps_null, double base); void atg_logsumexp(tensor *, tensor self, int64_t *dim_data, int dim_len, int keepdim); void atg_logsumexp_out(tensor *, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim); void atg_lstm(tensor *, tensor input, tensor *hx_data, int hx_len, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional, int batch_first); @@ -651,9 +842,12 @@ void atg_masked_fill_1(tensor *, tensor self, tensor mask, tensor value); void atg_masked_scatter(tensor *, tensor self, tensor mask, tensor source); void atg_masked_scatter_(tensor *, tensor self, tensor mask, tensor source); void atg_masked_select(tensor *, tensor self, tensor mask); +void atg_masked_select_backward(tensor *, tensor grad, tensor input, tensor mask); void atg_masked_select_out(tensor *, tensor out, tensor self, tensor mask); void atg_matmul(tensor *, tensor self, tensor other); void atg_matmul_out(tensor *, tensor out, tensor self, tensor other); +void atg_matrix_exp(tensor *, tensor self); +void atg_matrix_exp_backward(tensor *, tensor self, tensor grad); void atg_matrix_power(tensor *, tensor self, int64_t n); void atg_matrix_rank(tensor *, tensor self, int symmetric); void atg_matrix_rank1(tensor *, tensor self, double tol, int symmetric); @@ -682,7 +876,8 @@ void atg_max_unpool3d(tensor *, tensor self, tensor indices, int64_t *output_siz void atg_max_unpool3d_backward(tensor *, tensor grad_output, tensor self, tensor indices, int64_t *output_size_data, int output_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len); void atg_max_unpool3d_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, tensor indices, int64_t *output_size_data, int output_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len); void atg_max_unpool3d_out(tensor *, tensor out, tensor self, tensor indices, int64_t *output_size_data, int output_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len); -void atg_max_values(tensor *, tensor self, int64_t *dim_data, int dim_len, int keepdim); +void atg_maximum(tensor *, tensor self, tensor other); +void atg_maximum_out(tensor *, tensor out, tensor self, tensor other); void atg_mean(tensor *, tensor self, int dtype); void atg_mean1(tensor *, tensor self, int64_t *dim_data, int dim_len, int keepdim, int dtype); void atg_mean_out(tensor *, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim, int dtype); @@ -695,7 +890,8 @@ void atg_min1(tensor *, tensor self, tensor other); void atg_min2(tensor *, tensor self, int64_t dim, int keepdim); void atg_min_out(tensor *, tensor out, tensor self, tensor other); void atg_min_out1(tensor *, tensor min, tensor min_indices, tensor self, int64_t dim, int keepdim); -void atg_min_values(tensor *, tensor self, int64_t *dim_data, int dim_len, int keepdim); +void atg_minimum(tensor *, tensor self, tensor other); +void atg_minimum_out(tensor *, tensor out, tensor self, tensor other); void atg_miopen_batch_norm(tensor *, tensor input, tensor weight, tensor bias, tensor running_mean, tensor running_var, int training, double exponential_average_factor, double epsilon); void atg_miopen_batch_norm_backward(tensor *, tensor input, tensor grad_output, tensor weight, tensor running_mean, tensor running_var, tensor save_mean, tensor save_var, double epsilon); void atg_miopen_convolution(tensor *, tensor self, tensor weight, tensor bias, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic); @@ -715,11 +911,15 @@ void atg_mkldnn_convolution_backward_input(tensor *, int64_t *self_size_data, in void atg_mkldnn_convolution_backward_weights(tensor *, int64_t *weight_size_data, int weight_size_len, tensor grad_output, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int bias_defined); void atg_mkldnn_linear(tensor *, tensor input, tensor weight, tensor bias); void atg_mkldnn_max_pool2d(tensor *, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode); +void atg_mkldnn_max_pool3d(tensor *, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode); void atg_mkldnn_reorder_conv2d_weight(tensor *, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups); +void atg_mkldnn_reorder_conv3d_weight(tensor *, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups); void atg_mm(tensor *, tensor self, tensor mat2); void atg_mm_out(tensor *, tensor out, tensor self, tensor mat2); void atg_mode(tensor *, tensor self, int64_t dim, int keepdim); void atg_mode_out(tensor *, tensor values, tensor indices, tensor self, int64_t dim, int keepdim); +void atg_movedim(tensor *, tensor self, int64_t *source_data, int source_len, int64_t *destination_data, int destination_len); +void atg_movedim1(tensor *, tensor self, int64_t source, int64_t destination); void atg_mse_loss(tensor *, tensor self, tensor target, int64_t reduction); void atg_mse_loss_backward(tensor *, tensor grad_output, tensor self, tensor target, int64_t reduction); void atg_mse_loss_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, tensor target, int64_t reduction); @@ -737,17 +937,31 @@ void atg_multilabel_margin_loss_backward_out(tensor *, tensor grad_input, tensor void atg_multilabel_margin_loss_out(tensor *, tensor out, tensor self, tensor target, int64_t reduction); void atg_multinomial(tensor *, tensor self, int64_t num_samples, int replacement); void atg_multinomial_out(tensor *, tensor out, tensor self, int64_t num_samples, int replacement); +void atg_multiply(tensor *, tensor self, tensor other); +void atg_multiply1(tensor *, tensor self, scalar other); +void atg_multiply_(tensor *, tensor self, tensor other); +void atg_multiply_1(tensor *, tensor self, scalar other); +void atg_multiply_out(tensor *, tensor out, tensor self, tensor other); void atg_mv(tensor *, tensor self, tensor vec); void atg_mv_out(tensor *, tensor out, tensor self, tensor vec); void atg_mvlgamma(tensor *, tensor self, int64_t p); void atg_mvlgamma_(tensor *, tensor self, int64_t p); +void atg_nanquantile(tensor *, tensor self, double q, int64_t dim_v, uint8_t dim_null, int keepdim); +void atg_nanquantile1(tensor *, tensor self, tensor q, int64_t dim_v, uint8_t dim_null, int keepdim); +void atg_nanquantile_out(tensor *, tensor out, tensor self, double q, int64_t dim_v, uint8_t dim_null, int keepdim); +void atg_nanquantile_out1(tensor *, tensor out, tensor self, tensor q, int64_t dim_v, uint8_t dim_null, int keepdim); +void atg_nansum(tensor *, tensor self, int dtype); +void atg_nansum1(tensor *, tensor self, int64_t *dim_data, int dim_len, int keepdim, int dtype); +void atg_nansum_out(tensor *, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim, int dtype); void atg_narrow(tensor *, tensor self, int64_t dim, int64_t start, int64_t length); void atg_narrow1(tensor *, tensor self, int64_t dim, tensor start, int64_t length); void atg_narrow_copy(tensor *, tensor self, int64_t dim, int64_t start, int64_t length); void atg_native_batch_norm(tensor *, tensor input, tensor weight, tensor bias, tensor running_mean, tensor running_var, int training, double momentum, double eps); void atg_native_batch_norm_out(tensor *, tensor out, tensor save_mean, tensor save_invstd, tensor input, tensor weight, tensor bias, tensor running_mean, tensor running_var, int training, double momentum, double eps); +void atg_native_group_norm(tensor *, tensor input, tensor weight, tensor bias, int64_t n, int64_t C, int64_t HxW, int64_t group, double eps); void atg_native_layer_norm(tensor *, tensor input, tensor weight, tensor bias, int64_t M, int64_t n, double eps); void atg_native_norm(tensor *, tensor self); +void atg_native_norm1(tensor *, tensor self, scalar p, int64_t *dim_data, int dim_len, int keepdim, int dtype); void atg_ne(tensor *, tensor self, scalar other); void atg_ne1(tensor *, tensor self, tensor other); void atg_ne_(tensor *, tensor self, scalar other); @@ -757,9 +971,15 @@ void atg_ne_out1(tensor *, tensor out, tensor self, tensor other); void atg_neg(tensor *, tensor self); void atg_neg_(tensor *, tensor self); void atg_neg_out(tensor *, tensor out, tensor self); +void atg_negative(tensor *, tensor self); +void atg_negative_(tensor *, tensor self); +void atg_negative_out(tensor *, tensor out, tensor self); void atg_new_empty(tensor *, tensor self, int64_t *size_data, int size_len, int options_kind, int options_device); void atg_new_full(tensor *, tensor self, int64_t *size_data, int size_len, scalar fill_value, int options_kind, int options_device); void atg_new_zeros(tensor *, tensor self, int64_t *size_data, int size_len, int options_kind, int options_device); +void atg_nextafter(tensor *, tensor self, tensor other); +void atg_nextafter_(tensor *, tensor self, tensor other); +void atg_nextafter_out(tensor *, tensor out, tensor self, tensor other); void atg_nll_loss(tensor *, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index); void atg_nll_loss2d(tensor *, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index); void atg_nll_loss2d_backward(tensor *, tensor grad_output, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index, tensor total_weight); @@ -783,6 +1003,12 @@ void atg_normal_out(tensor *, tensor out, tensor mean, double std); void atg_normal_out1(tensor *, tensor out, double mean, tensor std); void atg_normal_out2(tensor *, tensor out, tensor mean, tensor std); void atg_normal_out3(tensor *, tensor out, double mean, double std, int64_t *size_data, int size_len); +void atg_not_equal(tensor *, tensor self, scalar other); +void atg_not_equal1(tensor *, tensor self, tensor other); +void atg_not_equal_(tensor *, tensor self, scalar other); +void atg_not_equal_1(tensor *, tensor self, tensor other); +void atg_not_equal_out(tensor *, tensor out, tensor self, scalar other); +void atg_not_equal_out1(tensor *, tensor out, tensor self, tensor other); void atg_nuclear_norm(tensor *, tensor self, int keepdim); void atg_nuclear_norm1(tensor *, tensor self, int64_t *dim_data, int dim_len, int keepdim); void atg_nuclear_norm_out(tensor *, tensor out, tensor self, int keepdim); @@ -796,6 +1022,8 @@ void atg_orgqr(tensor *, tensor self, tensor input2); void atg_orgqr_out(tensor *, tensor out, tensor self, tensor input2); void atg_ormqr(tensor *, tensor self, tensor input2, tensor input3, int left, int transpose); void atg_ormqr_out(tensor *, tensor out, tensor self, tensor input2, tensor input3, int left, int transpose); +void atg_outer(tensor *, tensor self, tensor vec2); +void atg_outer_out(tensor *, tensor out, tensor self, tensor vec2); void atg_pairwise_distance(tensor *, tensor x1, tensor x2, double p, double eps, int keepdim); void atg_pdist(tensor *, tensor self, double p); void atg_permute(tensor *, tensor self, int64_t *dims_data, int dims_len); @@ -804,6 +1032,8 @@ void atg_pinverse(tensor *, tensor self, double rcond); void atg_pixel_shuffle(tensor *, tensor self, int64_t upscale_factor); void atg_poisson(tensor *, tensor self); void atg_poisson_nll_loss(tensor *, tensor input, tensor target, int log_input, int full, double eps, int64_t reduction); +void atg_polar(tensor *, tensor abs, tensor angle); +void atg_polar_out(tensor *, tensor out, tensor abs, tensor angle); void atg_polygamma(tensor *, int64_t n, tensor self); void atg_polygamma_(tensor *, tensor self, int64_t n); void atg_polygamma_out(tensor *, tensor out, int64_t n, tensor self); @@ -825,18 +1055,23 @@ void atg_q_per_channel_scales(tensor *, tensor self); void atg_q_per_channel_zero_points(tensor *, tensor self); void atg_qr(tensor *, tensor self, int some); void atg_qr_out(tensor *, tensor Q, tensor R, tensor self, int some); +void atg_quantile(tensor *, tensor self, double q, int64_t dim_v, uint8_t dim_null, int keepdim); +void atg_quantile1(tensor *, tensor self, tensor q, int64_t dim_v, uint8_t dim_null, int keepdim); +void atg_quantile_out(tensor *, tensor out, tensor self, double q, int64_t dim_v, uint8_t dim_null, int keepdim); +void atg_quantile_out1(tensor *, tensor out, tensor self, tensor q, int64_t dim_v, uint8_t dim_null, int keepdim); void atg_quantize_per_channel(tensor *, tensor self, tensor scales, tensor zero_points, int64_t axis, int dtype); void atg_quantize_per_tensor(tensor *, tensor self, double scale, int64_t zero_point, int dtype); +tensor *atg_quantize_per_tensor1(tensor *tensors_data, int tensors_len, tensor scales, tensor zero_points, int dtype); void atg_quantized_batch_norm(tensor *, tensor input, tensor weight, tensor bias, tensor mean, tensor var, double eps, double output_scale, int64_t output_zero_point); -void atg_quantized_gru(tensor *, tensor input, tensor hx, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional, int batch_first); -void atg_quantized_gru1(tensor *, tensor data, tensor batch_sizes, tensor hx, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional); void atg_quantized_gru_cell(tensor *, tensor input, tensor hx, tensor w_ih, tensor w_hh, tensor b_ih, tensor b_hh, tensor packed_ih, tensor packed_hh, tensor col_offsets_ih, tensor col_offsets_hh, scalar scale_ih, scalar scale_hh, scalar zero_point_ih, scalar zero_point_hh); -void atg_quantized_lstm(tensor *, tensor input, tensor *hx_data, int hx_len, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional, int batch_first, int dtype, int use_dynamic); -void atg_quantized_lstm1(tensor *, tensor data, tensor batch_sizes, tensor *hx_data, int hx_len, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional, int dtype, int use_dynamic); void atg_quantized_lstm_cell(tensor *, tensor input, tensor *hx_data, int hx_len, tensor w_ih, tensor w_hh, tensor b_ih, tensor b_hh, tensor packed_ih, tensor packed_hh, tensor col_offsets_ih, tensor col_offsets_hh, scalar scale_ih, scalar scale_hh, scalar zero_point_ih, scalar zero_point_hh); +void atg_quantized_max_pool1d(tensor *, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode); void atg_quantized_max_pool2d(tensor *, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode); void atg_quantized_rnn_relu_cell(tensor *, tensor input, tensor hx, tensor w_ih, tensor w_hh, tensor b_ih, tensor b_hh, tensor packed_ih, tensor packed_hh, tensor col_offsets_ih, tensor col_offsets_hh, scalar scale_ih, scalar scale_hh, scalar zero_point_ih, scalar zero_point_hh); void atg_quantized_rnn_tanh_cell(tensor *, tensor input, tensor hx, tensor w_ih, tensor w_hh, tensor b_ih, tensor b_hh, tensor packed_ih, tensor packed_hh, tensor col_offsets_ih, tensor col_offsets_hh, scalar scale_ih, scalar scale_hh, scalar zero_point_ih, scalar zero_point_hh); +void atg_rad2deg(tensor *, tensor self); +void atg_rad2deg_(tensor *, tensor self); +void atg_rad2deg_out(tensor *, tensor out, tensor self); void atg_rand(tensor *, int64_t *size_data, int size_len, int options_kind, int options_device); void atg_rand_like(tensor *, tensor self); void atg_rand_out(tensor *, tensor out, int64_t *size_data, int size_len); @@ -851,7 +1086,7 @@ void atg_randn_like(tensor *, tensor self); void atg_randn_out(tensor *, tensor out, int64_t *size_data, int size_len); void atg_random_(tensor *, tensor self); void atg_random_1(tensor *, tensor self, int64_t to); -void atg_random_2(tensor *, tensor self, int64_t from, int64_t to); +void atg_random_2(tensor *, tensor self, int64_t from, int64_t to_v, uint8_t to_null); void atg_randperm(tensor *, int64_t n, int options_kind, int options_device); void atg_randperm_out(tensor *, tensor out, int64_t n); void atg_range(tensor *, scalar start, scalar end, int options_kind, int options_device); @@ -882,8 +1117,8 @@ void atg_renorm_(tensor *, tensor self, scalar p, int64_t dim, scalar maxnorm); void atg_renorm_out(tensor *, tensor out, tensor self, scalar p, int64_t dim, scalar maxnorm); void atg_repeat(tensor *, tensor self, int64_t *repeats_data, int repeats_len); void atg_repeat_interleave(tensor *, tensor repeats); -void atg_repeat_interleave1(tensor *, tensor self, tensor repeats, int64_t dim); -void atg_repeat_interleave2(tensor *, tensor self, int64_t repeats, int64_t dim); +void atg_repeat_interleave1(tensor *, tensor self, tensor repeats, int64_t dim_v, uint8_t dim_null); +void atg_repeat_interleave2(tensor *, tensor self, int64_t repeats, int64_t dim_v, uint8_t dim_null); void atg_replication_pad1d(tensor *, tensor self, int64_t *padding_data, int padding_len); void atg_replication_pad1d_backward(tensor *, tensor grad_output, tensor self, int64_t *padding_data, int padding_len); void atg_replication_pad1d_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, int64_t *padding_data, int padding_len); @@ -896,7 +1131,7 @@ void atg_replication_pad3d(tensor *, tensor self, int64_t *padding_data, int pad void atg_replication_pad3d_backward(tensor *, tensor grad_output, tensor self, int64_t *padding_data, int padding_len); void atg_replication_pad3d_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, int64_t *padding_data, int padding_len); void atg_replication_pad3d_out(tensor *, tensor out, tensor self, int64_t *padding_data, int padding_len); -void atg_requires_grad_(tensor *, tensor self, int _requires_grad); +void atg_requires_grad_(tensor *, tensor self, int requires_grad); void atg_reshape(tensor *, tensor self, int64_t *shape_data, int shape_len); void atg_reshape_as(tensor *, tensor self, tensor other); void atg_resize_(tensor *, tensor self, int64_t *size_data, int size_len); @@ -929,14 +1164,23 @@ void atg_scatter(tensor *, tensor self, int64_t dim, tensor index, tensor src); void atg_scatter1(tensor *, tensor self, int64_t dim, tensor index, scalar value); void atg_scatter_(tensor *, tensor self, int64_t dim, tensor index, tensor src); void atg_scatter_1(tensor *, tensor self, int64_t dim, tensor index, scalar value); +void atg_scatter_2(tensor *, tensor self, int64_t dim, tensor index, tensor src, char* reduce_ptr, int reduce_len); +void atg_scatter_3(tensor *, tensor self, int64_t dim, tensor index, scalar value, char* reduce_ptr, int reduce_len); void atg_scatter_add(tensor *, tensor self, int64_t dim, tensor index, tensor src); void atg_scatter_add_(tensor *, tensor self, int64_t dim, tensor index, tensor src); +void atg_searchsorted(tensor *, tensor sorted_sequence, tensor self, int out_int32, int right); +void atg_searchsorted1(tensor *, tensor sorted_sequence, scalar self_scalar, int out_int32, int right); +void atg_searchsorted_out(tensor *, tensor out, tensor sorted_sequence, tensor self, int out_int32, int right); void atg_select(tensor *, tensor self, int64_t dim, int64_t index); +void atg_select_backward(tensor *, tensor grad, int64_t *input_sizes_data, int input_sizes_len, int64_t dim, int64_t index); void atg_selu(tensor *, tensor self); void atg_selu_(tensor *, tensor self); void atg_set_(tensor *, tensor self); void atg_set_1(tensor *, tensor self, tensor source); void atg_set_requires_grad(tensor *, tensor self, int r); +void atg_sgn(tensor *, tensor self); +void atg_sgn_(tensor *, tensor self); +void atg_sgn_out(tensor *, tensor out, tensor self); void atg_sigmoid(tensor *, tensor self); void atg_sigmoid_(tensor *, tensor self); void atg_sigmoid_backward(tensor *, tensor grad_output, tensor output); @@ -945,6 +1189,12 @@ void atg_sigmoid_out(tensor *, tensor out, tensor self); void atg_sign(tensor *, tensor self); void atg_sign_(tensor *, tensor self); void atg_sign_out(tensor *, tensor out, tensor self); +void atg_signbit(tensor *, tensor self); +void atg_signbit_out(tensor *, tensor out, tensor self); +void atg_silu(tensor *, tensor self); +void atg_silu_(tensor *, tensor self); +void atg_silu_backward(tensor *, tensor grad_output, tensor self); +void atg_silu_out(tensor *, tensor out, tensor self); void atg_sin(tensor *, tensor self); void atg_sin_(tensor *, tensor self); void atg_sin_out(tensor *, tensor out, tensor self); @@ -952,6 +1202,7 @@ void atg_sinh(tensor *, tensor self); void atg_sinh_(tensor *, tensor self); void atg_sinh_out(tensor *, tensor out, tensor self); void atg_slice(tensor *, tensor self, int64_t dim, int64_t start, int64_t end, int64_t step); +void atg_slice_backward(tensor *, tensor grad, int64_t *input_sizes_data, int input_sizes_len, int64_t dim, int64_t start, int64_t end, int64_t step); void atg_slogdet(tensor *, tensor self); void atg_slow_conv3d(tensor *, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len); void atg_slow_conv3d_out(tensor *, tensor out, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len); @@ -962,10 +1213,10 @@ void atg_slow_conv_transpose2d_out(tensor *, tensor out, tensor self, tensor wei void atg_slow_conv_transpose3d(tensor *, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t *dilation_data, int dilation_len); void atg_slow_conv_transpose3d_out(tensor *, tensor out, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t *dilation_data, int dilation_len); void atg_smm(tensor *, tensor self, tensor mat2); -void atg_smooth_l1_loss(tensor *, tensor self, tensor target, int64_t reduction); -void atg_smooth_l1_loss_backward(tensor *, tensor grad_output, tensor self, tensor target, int64_t reduction); -void atg_smooth_l1_loss_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, tensor target, int64_t reduction); -void atg_smooth_l1_loss_out(tensor *, tensor out, tensor self, tensor target, int64_t reduction); +void atg_smooth_l1_loss(tensor *, tensor self, tensor target, int64_t reduction, double beta); +void atg_smooth_l1_loss_backward(tensor *, tensor grad_output, tensor self, tensor target, int64_t reduction, double beta); +void atg_smooth_l1_loss_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, tensor target, int64_t reduction, double beta); +void atg_smooth_l1_loss_out(tensor *, tensor out, tensor self, tensor target, int64_t reduction, double beta); void atg_soft_margin_loss(tensor *, tensor self, tensor target, int64_t reduction); void atg_soft_margin_loss_backward(tensor *, tensor grad_output, tensor self, tensor target, int64_t reduction); void atg_soft_margin_loss_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, tensor target, int64_t reduction); @@ -1009,12 +1260,17 @@ void atg_std1(tensor *, tensor self, int64_t *dim_data, int dim_len, int unbiase void atg_std_mean(tensor *, tensor self, int unbiased); void atg_std_mean1(tensor *, tensor self, int64_t *dim_data, int dim_len, int unbiased, int keepdim); void atg_std_out(tensor *, tensor out, tensor self, int64_t *dim_data, int dim_len, int unbiased, int keepdim); -void atg_stft(tensor *, tensor self, int64_t n_fft, int64_t hop_length, int64_t win_length, tensor window, int normalized, int onesided); +void atg_stft(tensor *, tensor self, int64_t n_fft, int64_t hop_length_v, uint8_t hop_length_null, int64_t win_length_v, uint8_t win_length_null, tensor window, int normalized, int onesided, int return_complex); void atg_sub(tensor *, tensor self, tensor other); void atg_sub1(tensor *, tensor self, scalar other); void atg_sub_(tensor *, tensor self, tensor other); void atg_sub_1(tensor *, tensor self, scalar other); void atg_sub_out(tensor *, tensor out, tensor self, tensor other); +void atg_subtract(tensor *, tensor self, tensor other); +void atg_subtract1(tensor *, tensor self, scalar other); +void atg_subtract_(tensor *, tensor self, tensor other); +void atg_subtract_1(tensor *, tensor self, scalar other); +void atg_subtract_out(tensor *, tensor out, tensor self, tensor other); void atg_sum(tensor *, tensor self, int dtype); void atg_sum1(tensor *, tensor self, int64_t *dim_data, int dim_len, int keepdim, int dtype); void atg_sum_out(tensor *, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim, int dtype); @@ -1026,6 +1282,7 @@ void atg_symeig_out(tensor *, tensor e, tensor V, tensor self, int eigenvectors, void atg_t(tensor *, tensor self); void atg_t_(tensor *, tensor self); void atg_take(tensor *, tensor self, tensor index); +void atg_take_backward(tensor *, tensor grad, tensor input, tensor index); void atg_take_out(tensor *, tensor out, tensor self, tensor index); void atg_tan(tensor *, tensor self); void atg_tan_(tensor *, tensor self); @@ -1055,6 +1312,7 @@ void atg_topk(tensor *, tensor self, int64_t k, int64_t dim, int largest, int so void atg_topk_out(tensor *, tensor values, tensor indices, tensor self, int64_t k, int64_t dim, int largest, int sorted); void atg_totype(tensor *, tensor self, int scalar_type); void atg_trace(tensor *, tensor self); +void atg_trace_backward(tensor *, tensor grad, int64_t *sizes_data, int sizes_len); void atg_transpose(tensor *, tensor self, int64_t dim0, int64_t dim1); void atg_transpose_(tensor *, tensor self, int64_t dim0, int64_t dim1); void atg_trapz(tensor *, tensor y, tensor x, int64_t dim); @@ -1080,51 +1338,67 @@ void atg_trunc_(tensor *, tensor self); void atg_trunc_out(tensor *, tensor out, tensor self); void atg_type_as(tensor *, tensor self, tensor other); tensor *atg_unbind(tensor self, int64_t dim); +void atg_unflatten(tensor *, tensor self, int64_t dim, int64_t *sizes_data, int sizes_len); void atg_unfold(tensor *, tensor self, int64_t dimension, int64_t size, int64_t step); +void atg_unfold_backward(tensor *, tensor grad_in, int64_t *input_sizes_data, int input_sizes_len, int64_t dim, int64_t size, int64_t step); void atg_uniform_(tensor *, tensor self, double from, double to); -void atg_unique_consecutive(tensor *, tensor self, int return_inverse, int return_counts, int64_t dim); +void atg_unique_consecutive(tensor *, tensor self, int return_inverse, int return_counts, int64_t dim_v, uint8_t dim_null); void atg_unique_dim(tensor *, tensor self, int64_t dim, int sorted, int return_inverse, int return_counts); void atg_unique_dim_consecutive(tensor *, tensor self, int64_t dim, int return_inverse, int return_counts); +tensor *atg_unsafe_chunk(tensor self, int64_t chunks, int64_t dim); +tensor *atg_unsafe_split(tensor self, int64_t split_size, int64_t dim); +tensor *atg_unsafe_split_with_sizes(tensor self, int64_t *split_sizes_data, int split_sizes_len, int64_t dim); void atg_unsqueeze(tensor *, tensor self, int64_t dim); void atg_unsqueeze_(tensor *, tensor self, int64_t dim); -void atg_upsample_bicubic2d(tensor *, tensor self, int64_t *output_size_data, int output_size_len, int align_corners, double scales_h, double scales_w); -void atg_upsample_bicubic2d_backward(tensor *, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners, double scales_h, double scales_w); -void atg_upsample_bicubic2d_backward_out(tensor *, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners, double scales_h, double scales_w); -void atg_upsample_bicubic2d_out(tensor *, tensor out, tensor self, int64_t *output_size_data, int output_size_len, int align_corners, double scales_h, double scales_w); -void atg_upsample_bilinear2d(tensor *, tensor self, int64_t *output_size_data, int output_size_len, int align_corners, double scales_h, double scales_w); -void atg_upsample_bilinear2d_backward(tensor *, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners, double scales_h, double scales_w); -void atg_upsample_bilinear2d_backward_out(tensor *, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners, double scales_h, double scales_w); -void atg_upsample_bilinear2d_out(tensor *, tensor out, tensor self, int64_t *output_size_data, int output_size_len, int align_corners, double scales_h, double scales_w); -void atg_upsample_linear1d(tensor *, tensor self, int64_t *output_size_data, int output_size_len, int align_corners, double scales); -void atg_upsample_linear1d_backward(tensor *, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners, double scales); -void atg_upsample_linear1d_backward_out(tensor *, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners, double scales); -void atg_upsample_linear1d_out(tensor *, tensor out, tensor self, int64_t *output_size_data, int output_size_len, int align_corners, double scales); -void atg_upsample_nearest1d(tensor *, tensor self, int64_t *output_size_data, int output_size_len, double scales); -void atg_upsample_nearest1d_backward(tensor *, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, double scales); -void atg_upsample_nearest1d_backward_out(tensor *, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, double scales); -void atg_upsample_nearest1d_out(tensor *, tensor out, tensor self, int64_t *output_size_data, int output_size_len, double scales); -void atg_upsample_nearest2d(tensor *, tensor self, int64_t *output_size_data, int output_size_len, double scales_h, double scales_w); -void atg_upsample_nearest2d_backward(tensor *, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, double scales_h, double scales_w); -void atg_upsample_nearest2d_backward_out(tensor *, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, double scales_h, double scales_w); -void atg_upsample_nearest2d_out(tensor *, tensor out, tensor self, int64_t *output_size_data, int output_size_len, double scales_h, double scales_w); -void atg_upsample_nearest3d(tensor *, tensor self, int64_t *output_size_data, int output_size_len, double scales_d, double scales_h, double scales_w); -void atg_upsample_nearest3d_backward(tensor *, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, double scales_d, double scales_h, double scales_w); -void atg_upsample_nearest3d_backward_out(tensor *, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, double scales_d, double scales_h, double scales_w); -void atg_upsample_nearest3d_out(tensor *, tensor out, tensor self, int64_t *output_size_data, int output_size_len, double scales_d, double scales_h, double scales_w); -void atg_upsample_trilinear3d(tensor *, tensor self, int64_t *output_size_data, int output_size_len, int align_corners, double scales_d, double scales_h, double scales_w); -void atg_upsample_trilinear3d_backward(tensor *, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners, double scales_d, double scales_h, double scales_w); -void atg_upsample_trilinear3d_backward_out(tensor *, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners, double scales_d, double scales_h, double scales_w); -void atg_upsample_trilinear3d_out(tensor *, tensor out, tensor self, int64_t *output_size_data, int output_size_len, int align_corners, double scales_d, double scales_h, double scales_w); +void atg_upsample_bicubic2d(tensor *, tensor self, int64_t *output_size_data, int output_size_len, int align_corners, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null); +void atg_upsample_bicubic2d_backward(tensor *, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null); +void atg_upsample_bicubic2d_backward_out(tensor *, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null); +void atg_upsample_bicubic2d_out(tensor *, tensor out, tensor self, int64_t *output_size_data, int output_size_len, int align_corners, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null); +void atg_upsample_bilinear2d(tensor *, tensor self, int64_t *output_size_data, int output_size_len, int align_corners, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null); +void atg_upsample_bilinear2d_backward(tensor *, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null); +void atg_upsample_bilinear2d_backward_out(tensor *, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null); +void atg_upsample_bilinear2d_out(tensor *, tensor out, tensor self, int64_t *output_size_data, int output_size_len, int align_corners, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null); +void atg_upsample_linear1d(tensor *, tensor self, int64_t *output_size_data, int output_size_len, int align_corners, double scales_v, uint8_t scales_null); +void atg_upsample_linear1d_backward(tensor *, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners, double scales_v, uint8_t scales_null); +void atg_upsample_linear1d_backward_out(tensor *, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners, double scales_v, uint8_t scales_null); +void atg_upsample_linear1d_out(tensor *, tensor out, tensor self, int64_t *output_size_data, int output_size_len, int align_corners, double scales_v, uint8_t scales_null); +void atg_upsample_nearest1d(tensor *, tensor self, int64_t *output_size_data, int output_size_len, double scales_v, uint8_t scales_null); +void atg_upsample_nearest1d_backward(tensor *, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, double scales_v, uint8_t scales_null); +void atg_upsample_nearest1d_backward_out(tensor *, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, double scales_v, uint8_t scales_null); +void atg_upsample_nearest1d_out(tensor *, tensor out, tensor self, int64_t *output_size_data, int output_size_len, double scales_v, uint8_t scales_null); +void atg_upsample_nearest2d(tensor *, tensor self, int64_t *output_size_data, int output_size_len, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null); +void atg_upsample_nearest2d_backward(tensor *, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null); +void atg_upsample_nearest2d_backward_out(tensor *, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null); +void atg_upsample_nearest2d_out(tensor *, tensor out, tensor self, int64_t *output_size_data, int output_size_len, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null); +void atg_upsample_nearest3d(tensor *, tensor self, int64_t *output_size_data, int output_size_len, double scales_d_v, uint8_t scales_d_null, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null); +void atg_upsample_nearest3d_backward(tensor *, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, double scales_d_v, uint8_t scales_d_null, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null); +void atg_upsample_nearest3d_backward_out(tensor *, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, double scales_d_v, uint8_t scales_d_null, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null); +void atg_upsample_nearest3d_out(tensor *, tensor out, tensor self, int64_t *output_size_data, int output_size_len, double scales_d_v, uint8_t scales_d_null, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null); +void atg_upsample_trilinear3d(tensor *, tensor self, int64_t *output_size_data, int output_size_len, int align_corners, double scales_d_v, uint8_t scales_d_null, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null); +void atg_upsample_trilinear3d_backward(tensor *, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners, double scales_d_v, uint8_t scales_d_null, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null); +void atg_upsample_trilinear3d_backward_out(tensor *, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners, double scales_d_v, uint8_t scales_d_null, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null); +void atg_upsample_trilinear3d_out(tensor *, tensor out, tensor self, int64_t *output_size_data, int output_size_len, int align_corners, double scales_d_v, uint8_t scales_d_null, double scales_h_v, uint8_t scales_h_null, double scales_w_v, uint8_t scales_w_null); +void atg_value_selecting_reduction_backward(tensor *, tensor grad, int64_t dim, tensor indices, int64_t *sizes_data, int sizes_len, int keepdim); void atg_values(tensor *, tensor self); +void atg_vander(tensor *, tensor x, int64_t n_v, uint8_t n_null, int increasing); void atg_var(tensor *, tensor self, int unbiased); void atg_var1(tensor *, tensor self, int64_t *dim_data, int dim_len, int unbiased, int keepdim); void atg_var_mean(tensor *, tensor self, int unbiased); void atg_var_mean1(tensor *, tensor self, int64_t *dim_data, int dim_len, int unbiased, int keepdim); void atg_var_out(tensor *, tensor out, tensor self, int64_t *dim_data, int dim_len, int unbiased, int keepdim); +void atg_vdot(tensor *, tensor self, tensor other); +void atg_vdot_out(tensor *, tensor out, tensor self, tensor other); void atg_view(tensor *, tensor self, int64_t *size_data, int size_len); void atg_view_as(tensor *, tensor self, tensor other); +void atg_view_as_complex(tensor *, tensor self); +void atg_view_as_real(tensor *, tensor self); +void atg_vstack(tensor *, tensor *tensors_data, int tensors_len); +void atg_vstack_out(tensor *, tensor out, tensor *tensors_data, int tensors_len); tensor *atg_where(tensor condition); void atg_where1(tensor *, tensor condition, tensor self, tensor other); +void atg_where2(tensor *, tensor condition, scalar self_scalar, tensor other); +void atg_where3(tensor *, tensor condition, tensor self, scalar other); +void atg_where4(tensor *, tensor condition, scalar self_scalar, scalar other); void atg_zero_(tensor *, tensor self); void atg_zeros(tensor *, int64_t *size_data, int size_len, int options_kind, int options_device); void atg_zeros_like(tensor *, tensor self); diff --git a/nn/sequential.go b/nn/sequential.go index 4089459..f89ad32 100644 --- a/nn/sequential.go +++ b/nn/sequential.go @@ -254,6 +254,40 @@ func BatchAccuracyForLogits(vs *VarStore, m ts.ModuleT, xs, ys *ts.Tensor, d got return sumAccuracy / sampleCount } +func BatchAccuracyForLogitsOld(vs *VarStore, m ts.ModuleT, xs, ys *ts.Tensor, d gotch.Device, batchSize int) (retVal float64) { + + var ( + sumAccuracy float64 = 0.0 + sampleCount float64 = 0.0 + ) + + vs.Freeze() + defer vs.Unfreeze() + + iter2 := ts.MustNewIter2(xs, ys, int64(batchSize)) + for { + item, ok := iter2.Next() + if !ok { + break + } + + size := float64(item.Data.MustSize()[0]) + bImages := item.Data.MustTo(d, true) + bLabels := item.Label.MustTo(d, true) + + logits := m.ForwardT(bImages, false) + acc := logits.AccuracyForLogits(bLabels) + sumAccuracy += acc.Float64Values()[0] * size + sampleCount += size + + bImages.MustDrop() + bLabels.MustDrop() + acc.MustDrop() + } + + return sumAccuracy / sampleCount +} + // BatchAccuracyForLogitIdx is an alternative of BatchAccuracyForLogits to // calculate accuracy for specified batch on module weight. It uses tensor // indexing instead of Iter2 diff --git a/setup-cpu.sh b/setup-cpu.sh index 101dcfe..a3866b7 100755 --- a/setup-cpu.sh +++ b/setup-cpu.sh @@ -1,8 +1,8 @@ #!/bin/bash # Env -GOTCH_VERSION="${GOTCH_VER:-v0.2.0}" -LIBTORCH_VERSION="${LIBTORCH_VER:-1.5.1}" +GOTCH_VERSION="${GOTCH_VER:-v0.3.0}" +LIBTORCH_VERSION="${LIBTORCH_VER:-1.7.0}" GOTCH="$GOPATH/pkg/mod/github.com/sugarme/gotch@$GOTCH_VERSION" LIBTORCH="$GOPATH/pkg/mod/github.com/sugarme/gotch@$GOTCH_VERSION/libtch/libtorch" diff --git a/setup-gpu.sh b/setup-gpu.sh index 72537ee..2355db3 100755 --- a/setup-gpu.sh +++ b/setup-gpu.sh @@ -1,7 +1,7 @@ #!/bin/bash -GOTCH_VERSION="${GOTCH_VER:-v0.2.0}" -LIBTORCH_VERSION="${LIBTORCH_VER:-1.5.1}" +GOTCH_VERSION="${GOTCH_VER:-v0.3.0}" +LIBTORCH_VERSION="${LIBTORCH_VER:-1.7.0}" CUDA_VERSION="${CUDA_VER:-10.1}" CU_VERSION="${CUDA_VERSION//./}" diff --git a/setup.sh b/setup.sh new file mode 100755 index 0000000..97db2b7 --- /dev/null +++ b/setup.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +export GOTCH_VERSION="${GOTCH_VER:-v0.3.0}" +export LIBTORCH_VERSION="${LIBTORCH_VER:-1.7.0}" +export CUDA_VERSION="${CUDA_VER:-10.1}" +export CU_VERSION="${CUDA_VERSION//./}" + +export GOTCH="$HOME/projects/sugarme/gotch" +export LIBTORCH="$HOME/projects/sugarme/gotch/libtch/libtorch" +export LIBRARY_PATH="$LIBTORCH/lib" +export CPATH="$LIBTORCH/lib:$LIBTORCH/include:$LIBTORCH/include/torch/csrc/api/include" +export LD_LIBRARY_PATH="$LIBTORCH/lib:/usr/lib64-nvidia:/usr/local/cuda-${CUDA_VERSION}/lib64" + +sudo rm -rf $LIBTORCH +sudo mkdir -p $LIBTORCH + +wget -O /tmp/libtorch-cxx11-abi-shared-with-deps-${LIBTORCH_VERSION}%2Bcu${CU_VERSION}.zip https://download.pytorch.org/libtorch/cu${CU_VERSION}/libtorch-cxx11-abi-shared-with-deps-${LIBTORCH_VERSION}%2Bcu${CU_VERSION}.zip +sudo unzip /tmp/libtorch-cxx11-abi-shared-with-deps-${LIBTORCH_VERSION}%2Bcu${CU_VERSION}.zip -d $GOTCH/libtch diff --git a/tensor/must-tensor-generated.go b/tensor/must-tensor-generated.go index 1bfcefe..e5d0c74 100644 --- a/tensor/must-tensor-generated.go +++ b/tensor/must-tensor-generated.go @@ -2,10050 +2,10178 @@ package tensor // NOTE. THIS FILE IS AUTOMATICALLY GENERATED, DO NOT EDIT BY HAND! -import ( - "log" +import( + "log" - "github.com/sugarme/gotch" + "github.com/sugarme/gotch" ) -func (ts *Tensor) Must__And_(other *Scalar) { - err := ts.__And_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) Must__And1(other *Tensor) { - - err := ts.__And1(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) Must__Iand_(other *Scalar) { - - err := ts.__Iand_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) Must__Iand1(other *Tensor) { - - err := ts.__Iand1(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) Must__Ilshift_(other *Scalar) { - - err := ts.__Ilshift_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) Must__Ilshift1(other *Tensor) { - - err := ts.__Ilshift1(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) Must__Ior_(other *Scalar) { - - err := ts.__Ior_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) Must__Ior1(other *Tensor) { - - err := ts.__Ior1(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) Must__Irshift_(other *Scalar) { - - err := ts.__Irshift_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) Must__Irshift1(other *Tensor) { - - err := ts.__Irshift1(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) Must__Ixor_(other *Scalar) { - - err := ts.__Ixor_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) Must__Ixor1(other *Tensor) { - - err := ts.__Ixor1(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) Must__Lshift_(other *Scalar) { - - err := ts.__Lshift_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) Must__Lshift1(other *Tensor) { - - err := ts.__Lshift1(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) Must__Or_(other *Scalar) { - - err := ts.__Or_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) Must__Or1(other *Tensor) { - - err := ts.__Or1(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) Must__Rshift_(other *Scalar) { - - err := ts.__Rshift_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) Must__Rshift1(other *Tensor) { - - err := ts.__Rshift1(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) Must__Xor_(other *Scalar) { - - err := ts.__Xor_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) Must__Xor1(other *Tensor) { - - err := ts.__Xor1(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) Must_AdaptiveAvgPool2d(outputSize []int64, del bool) (retVal *Tensor) { - - retVal, err := ts._AdaptiveAvgPool2d(outputSize, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_AdaptiveAvgPool2dBackward(gradOutput *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts._AdaptiveAvgPool2dBackward(gradOutput, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_Addr(vec1 *Tensor, vec2 *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts._Addr(vec1, vec2, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_Addr_(vec1 *Tensor, vec2 *Tensor) { - - err := ts._Addr_(vec1, vec2) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) Must_AddrOut(out *Tensor, vec1 *Tensor, vec2 *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts._AddrOut(out, vec1, vec2, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_AmpUpdateScale(growthTracker *Tensor, currentScale *Tensor, foundInf *Tensor, scaleGrowthFactor float64, scaleBackoffFactor float64, growthInterval int64) (retVal *Tensor) { - - retVal, err := _AmpUpdateScale(growthTracker, currentScale, foundInf, scaleGrowthFactor, scaleBackoffFactor, growthInterval) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_BaddbmmMkl_(batch1 *Tensor, batch2 *Tensor) { - - err := ts._BaddbmmMkl_(batch1, batch2) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) Must_CastByte(nonBlocking bool, del bool) (retVal *Tensor) { - - retVal, err := ts._CastByte(nonBlocking, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_CastChar(nonBlocking bool, del bool) (retVal *Tensor) { - - retVal, err := ts._CastChar(nonBlocking, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_CastDouble(nonBlocking bool, del bool) (retVal *Tensor) { - - retVal, err := ts._CastDouble(nonBlocking, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_CastFloat(nonBlocking bool, del bool) (retVal *Tensor) { - - retVal, err := ts._CastFloat(nonBlocking, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_CastHalf(nonBlocking bool, del bool) (retVal *Tensor) { - - retVal, err := ts._CastHalf(nonBlocking, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_CastInt(nonBlocking bool, del bool) (retVal *Tensor) { - - retVal, err := ts._CastInt(nonBlocking, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_CastLong(nonBlocking bool, del bool) (retVal *Tensor) { - - retVal, err := ts._CastLong(nonBlocking, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_CastShort(nonBlocking bool, del bool) (retVal *Tensor) { - - retVal, err := ts._CastShort(nonBlocking, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_Cat(tensors []Tensor, dim int64) (retVal *Tensor) { - - retVal, err := _Cat(tensors, dim) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_CatOut(out *Tensor, tensors []Tensor, dim int64) (retVal *Tensor) { - - retVal, err := _CatOut(out, tensors, dim) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_CdistBackward(grad *Tensor, x1 *Tensor, x2 *Tensor, p float64, cdist *Tensor) (retVal *Tensor) { - - retVal, err := _CdistBackward(grad, x1, x2, p, cdist) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_CholeskyHelper(upper bool, del bool) (retVal *Tensor) { - - retVal, err := ts._CholeskyHelper(upper, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_CholeskySolveHelper(a *Tensor, upper bool, del bool) (retVal *Tensor) { - - retVal, err := ts._CholeskySolveHelper(a, upper, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_Coalesced_(coalesced bool) { - - err := ts._Coalesced_(coalesced) - if err != nil { - log.Fatal(err) - } - - return -} - -func Must_Convolution(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64, groups int64, benchmark bool, deterministic bool, cudnnEnabled bool) (retVal *Tensor) { - - retVal, err := _Convolution(input, weight, bias, stride, padding, dilation, transposed, outputPadding, groups, benchmark, deterministic, cudnnEnabled) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_ConvolutionNogroup(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64) (retVal *Tensor) { - - retVal, err := _ConvolutionNogroup(input, weight, bias, stride, padding, dilation, transposed, outputPadding) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_CopyFrom(dst *Tensor, nonBlocking bool, del bool) (retVal *Tensor) { - - retVal, err := ts._CopyFrom(dst, nonBlocking, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_CtcLossBackward(grad *Tensor, logProbs *Tensor, targets *Tensor, inputLengths []int64, targetLengths []int64, negLogLikelihood *Tensor, logAlpha *Tensor, blank int64, zeroInfinity bool) (retVal *Tensor) { - - retVal, err := _CtcLossBackward(grad, logProbs, targets, inputLengths, targetLengths, negLogLikelihood, logAlpha, blank, zeroInfinity) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_CudnnInitDropoutState(dropout float64, train bool, dropoutSeed int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := _CudnnInitDropoutState(dropout, train, dropoutSeed, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_CudnnRnnFlattenWeight(weightArr []Tensor, weightStride0 int64, inputSize int64, mode int64, hiddenSize int64, numLayers int64, batchFirst bool, bidirectional bool) (retVal *Tensor) { - - retVal, err := _CudnnRnnFlattenWeight(weightArr, weightStride0, inputSize, mode, hiddenSize, numLayers, batchFirst, bidirectional) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_Cumprod(dim int64, del bool) (retVal *Tensor) { - - retVal, err := ts._Cumprod(dim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_CumprodOut(out *Tensor, dim int64, del bool) (retVal *Tensor) { - - retVal, err := ts._CumprodOut(out, dim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_Cumsum(dim int64, del bool) (retVal *Tensor) { - - retVal, err := ts._Cumsum(dim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_CumsumOut(out *Tensor, dim int64, del bool) (retVal *Tensor) { - - retVal, err := ts._CumsumOut(out, dim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_DimArange(like *Tensor, dim int64) (retVal *Tensor) { - - retVal, err := _DimArange(like, dim) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_DirichletGrad(x *Tensor, alpha *Tensor, total *Tensor) (retVal *Tensor) { - - retVal, err := _DirichletGrad(x, alpha, total) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_EmbeddingBagBackward(grad *Tensor, indices *Tensor, offsets *Tensor, offset2bag *Tensor, bagSize *Tensor, maximumIndices *Tensor, numWeights int64, scaleGradByFreq bool, mode int64, sparse bool, perSampleWeights *Tensor) (retVal *Tensor) { - - retVal, err := _EmbeddingBagBackward(grad, indices, offsets, offset2bag, bagSize, maximumIndices, numWeights, scaleGradByFreq, mode, sparse, perSampleWeights) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_EmbeddingBagDenseBackward(grad *Tensor, indices *Tensor, offsets *Tensor, offset2bag *Tensor, bagSize *Tensor, maximumIndices *Tensor, numWeights int64, scaleGradByFreq bool, mode int64, perSampleWeights *Tensor) (retVal *Tensor) { - - retVal, err := _EmbeddingBagDenseBackward(grad, indices, offsets, offset2bag, bagSize, maximumIndices, numWeights, scaleGradByFreq, mode, perSampleWeights) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_EmbeddingBagPerSampleWeightsBackward(grad *Tensor, weight *Tensor, indices *Tensor, offsets *Tensor, offset2bag *Tensor, mode int64) (retVal *Tensor) { - - retVal, err := _EmbeddingBagPerSampleWeightsBackward(grad, weight, indices, offsets, offset2bag, mode) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_EmbeddingBagSparseBackward(grad *Tensor, indices *Tensor, offsets *Tensor, offset2bag *Tensor, bagSize *Tensor, numWeights int64, scaleGradByFreq bool, mode int64, perSampleWeights *Tensor) (retVal *Tensor) { - - retVal, err := _EmbeddingBagSparseBackward(grad, indices, offsets, offset2bag, bagSize, numWeights, scaleGradByFreq, mode, perSampleWeights) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_EmptyAffineQuantized(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device, scale float64, zeroPoint int64) (retVal *Tensor) { - - retVal, err := _EmptyAffineQuantized(size, optionsKind, optionsDevice, scale, zeroPoint) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_EmptyPerChannelAffineQuantized(size []int64, scales *Tensor, zeroPoints *Tensor, axis int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := _EmptyPerChannelAffineQuantized(size, scales, zeroPoints, axis, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_FftWithSize(signalNdim int64, complexInput bool, complexOutput bool, inverse bool, checkedSignalSizes []int64, normalized bool, onesided bool, outputSizes []int64, del bool) (retVal *Tensor) { - - retVal, err := ts._FftWithSize(signalNdim, complexInput, complexOutput, inverse, checkedSignalSizes, normalized, onesided, outputSizes, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_GatherSparseBackward(dim int64, index *Tensor, grad *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts._GatherSparseBackward(dim, index, grad, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_IndexCopy_(dim int64, index *Tensor, source *Tensor) { - - err := ts._IndexCopy_(dim, index, source) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) Must_IndexPutImpl_(indices []Tensor, values *Tensor, accumulate bool, unsafety bool) { - - err := ts._IndexPutImpl_(indices, values, accumulate, unsafety) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) Must_Indices(del bool) (retVal *Tensor) { - - retVal, err := ts._Indices(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_InverseHelper(del bool) (retVal *Tensor) { - - retVal, err := ts._InverseHelper(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_LogSoftmax(dim int64, halfToFloat bool, del bool) (retVal *Tensor) { - - retVal, err := ts._LogSoftmax(dim, halfToFloat, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_LogSoftmaxBackwardData(gradOutput *Tensor, output *Tensor, dim int64, del bool) (retVal *Tensor) { - - retVal, err := ts._LogSoftmaxBackwardData(gradOutput, output, dim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_LuSolveHelper(lUData *Tensor, lUPivots *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts._LuSolveHelper(lUData, lUPivots, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_MakePerChannelQuantizedTensor(scale *Tensor, zeroPoint *Tensor, axis int64, del bool) (retVal *Tensor) { - - retVal, err := ts._MakePerChannelQuantizedTensor(scale, zeroPoint, axis, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_MakePerTensorQuantizedTensor(scale float64, zeroPoint int64, del bool) (retVal *Tensor) { - - retVal, err := ts._MakePerTensorQuantizedTensor(scale, zeroPoint, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_MaskedScale(mask *Tensor, scale float64, del bool) (retVal *Tensor) { - - retVal, err := ts._MaskedScale(mask, scale, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_MkldnnReshape(shape []int64, del bool) (retVal *Tensor) { - - retVal, err := ts._MkldnnReshape(shape, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_MkldnnTranspose(dim0 int64, dim1 int64, del bool) (retVal *Tensor) { - - retVal, err := ts._MkldnnTranspose(dim0, dim1, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_MkldnnTranspose_(dim0 int64, dim1 int64) { - - err := ts._MkldnnTranspose_(dim0, dim1) - if err != nil { - log.Fatal(err) - } - - return -} - -func Must_MultinomialAliasDraw(j *Tensor, q *Tensor, numSamples int64) (retVal *Tensor) { - - retVal, err := _MultinomialAliasDraw(j, q, numSamples) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_NnpackSpatialConvolution(input *Tensor, weight *Tensor, bias *Tensor, padding []int64, stride []int64) (retVal *Tensor) { - - retVal, err := _NnpackSpatialConvolution(input, weight, bias, padding, stride) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_NnpackSpatialConvolutionBackwardInput(input *Tensor, gradOutput *Tensor, weight *Tensor, padding []int64) (retVal *Tensor) { - - retVal, err := _NnpackSpatialConvolutionBackwardInput(input, gradOutput, weight, padding) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_NnpackSpatialConvolutionBackwardWeight(input *Tensor, weightsize []int64, gradOutput *Tensor, padding []int64) (retVal *Tensor) { - - retVal, err := _NnpackSpatialConvolutionBackwardWeight(input, weightsize, gradOutput, padding) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_PackPaddedSequenceBackward(grad *Tensor, inputSize []int64, batchSizes *Tensor, batchFirst bool) (retVal *Tensor) { - - retVal, err := _PackPaddedSequenceBackward(grad, inputSize, batchSizes, batchFirst) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_PdistBackward(grad *Tensor, p float64, pdist *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts._PdistBackward(grad, p, pdist, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_ReshapeFromTensor(shape *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts._ReshapeFromTensor(shape, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_SWhere(condition *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts._SWhere(condition, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_SampleDirichlet(del bool) (retVal *Tensor) { - - retVal, err := ts._SampleDirichlet(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_ShapeAsTensor(del bool) (retVal *Tensor) { - - retVal, err := ts._ShapeAsTensor(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_SobolEngineFf_(n int64, sobolstate *Tensor, dimension int64, numGenerated int64) { - - err := ts._SobolEngineFf_(n, sobolstate, dimension, numGenerated) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) Must_SobolEngineInitializeState_(dimension int64) { - - err := ts._SobolEngineInitializeState_(dimension) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) Must_SobolEngineScramble_(ltm *Tensor, dimension int64) { - - err := ts._SobolEngineScramble_(ltm, dimension) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) Must_Softmax(dim int64, halfToFloat bool, del bool) (retVal *Tensor) { - - retVal, err := ts._Softmax(dim, halfToFloat, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_SoftmaxBackwardData(gradOutput *Tensor, output *Tensor, dim int64, del bool) (retVal *Tensor) { - - retVal, err := ts._SoftmaxBackwardData(gradOutput, output, dim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_SparseAddmm(sparse *Tensor, dense *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts._SparseAddmm(sparse, dense, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_SparseCooTensorUnsafe(indices *Tensor, values *Tensor, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := _SparseCooTensorUnsafe(indices, values, size, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_SparseCooTensorWithDims(sparseDim int64, denseDim int64, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := _SparseCooTensorWithDims(sparseDim, denseDim, size, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_SparseCooTensorWithDimsAndTensors(sparseDim int64, denseDim int64, size []int64, indices *Tensor, values *Tensor, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := _SparseCooTensorWithDimsAndTensors(sparseDim, denseDim, size, indices, values, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_SparseMm(sparse *Tensor, dense *Tensor) (retVal *Tensor) { - - retVal, err := _SparseMm(sparse, dense) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_SparseSum(del bool) (retVal *Tensor) { - - retVal, err := ts._SparseSum(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_SparseSum1(dtype gotch.DType, del bool) (retVal *Tensor) { - - retVal, err := ts._SparseSum1(dtype, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_SparseSum2(dim []int64, del bool) (retVal *Tensor) { - - retVal, err := ts._SparseSum2(dim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_SparseSum3(dim []int64, dtype gotch.DType, del bool) (retVal *Tensor) { - - retVal, err := ts._SparseSum3(dim, dtype, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_SparseSumBackward(grad *Tensor, dim []int64, del bool) (retVal *Tensor) { - - retVal, err := ts._SparseSumBackward(grad, dim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_StandardGamma(del bool) (retVal *Tensor) { - - retVal, err := ts._StandardGamma(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_StandardGammaGrad(output *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts._StandardGammaGrad(output, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_Std(unbiased bool, del bool) (retVal *Tensor) { - - retVal, err := ts._Std(unbiased, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_Trilinear(i1 *Tensor, i2 *Tensor, i3 *Tensor, expand1 []int64, expand2 []int64, expand3 []int64, sumdim []int64, unrollDim int64) (retVal *Tensor) { - - retVal, err := _Trilinear(i1, i2, i3, expand1, expand2, expand3, sumdim, unrollDim) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_UnsafeView(size []int64, del bool) (retVal *Tensor) { - - retVal, err := ts._UnsafeView(size, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_Values(del bool) (retVal *Tensor) { - - retVal, err := ts._Values(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) Must_Var(unbiased bool, del bool) (retVal *Tensor) { - - retVal, err := ts._Var(unbiased, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func Must_WeightNorm(v *Tensor, g *Tensor, dim int64) (retVal *Tensor) { - - retVal, err := _WeightNorm(v, g, dim) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAbs(del bool) (retVal *Tensor) { - - retVal, err := ts.Abs(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAbs_() { - - err := ts.Abs_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustAbsOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.AbsOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAcos(del bool) (retVal *Tensor) { - - retVal, err := ts.Acos(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAcos_() { - - err := ts.Acos_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustAcosOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.AcosOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAdaptiveAvgPool1d(outputSize []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.AdaptiveAvgPool1d(outputSize, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAdaptiveAvgPool2d(outputSize []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.AdaptiveAvgPool2d(outputSize, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAdaptiveAvgPool2dOut(out *Tensor, outputSize []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.AdaptiveAvgPool2dOut(out, outputSize, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAdaptiveAvgPool3d(outputSize []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.AdaptiveAvgPool3d(outputSize, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAdaptiveAvgPool3dBackward(gradOutput *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.AdaptiveAvgPool3dBackward(gradOutput, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAdaptiveAvgPool3dBackwardOut(gradInput *Tensor, gradOutput *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.AdaptiveAvgPool3dBackwardOut(gradInput, gradOutput, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAdaptiveAvgPool3dOut(out *Tensor, outputSize []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.AdaptiveAvgPool3dOut(out, outputSize, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAdaptiveMaxPool2dBackward(gradOutput *Tensor, indices *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.AdaptiveMaxPool2dBackward(gradOutput, indices, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAdaptiveMaxPool2dBackwardOut(gradInput *Tensor, gradOutput *Tensor, indices *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.AdaptiveMaxPool2dBackwardOut(gradInput, gradOutput, indices, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAdaptiveMaxPool3dBackward(gradOutput *Tensor, indices *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.AdaptiveMaxPool3dBackward(gradOutput, indices, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAdaptiveMaxPool3dBackwardOut(gradInput *Tensor, gradOutput *Tensor, indices *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.AdaptiveMaxPool3dBackwardOut(gradInput, gradOutput, indices, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAdd(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Add(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAdd1(other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.Add1(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAdd_(other *Tensor) { - - err := ts.Add_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustAdd1_(other *Scalar) { - - err := ts.Add1_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustAddOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.AddOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAddbmm(batch1 *Tensor, batch2 *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Addbmm(batch1, batch2, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAddbmm_(batch1 *Tensor, batch2 *Tensor) { - - err := ts.Addbmm_(batch1, batch2) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustAddbmmOut(out *Tensor, batch1 *Tensor, batch2 *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.AddbmmOut(out, batch1, batch2, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAddcdiv(tensor1 *Tensor, tensor2 *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Addcdiv(tensor1, tensor2, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAddcdiv_(tensor1 *Tensor, tensor2 *Tensor) { - - err := ts.Addcdiv_(tensor1, tensor2) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustAddcdivOut(out *Tensor, tensor1 *Tensor, tensor2 *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.AddcdivOut(out, tensor1, tensor2, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAddcmul(tensor1 *Tensor, tensor2 *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Addcmul(tensor1, tensor2, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAddcmul_(tensor1 *Tensor, tensor2 *Tensor) { - - err := ts.Addcmul_(tensor1, tensor2) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustAddcmulOut(out *Tensor, tensor1 *Tensor, tensor2 *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.AddcmulOut(out, tensor1, tensor2, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAddmm(mat1 *Tensor, mat2 *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Addmm(mat1, mat2, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAddmm_(mat1 *Tensor, mat2 *Tensor) { - - err := ts.Addmm_(mat1, mat2) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustAddmmOut(out *Tensor, mat1 *Tensor, mat2 *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.AddmmOut(out, mat1, mat2, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAddmv(mat *Tensor, vec *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Addmv(mat, vec, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAddmv_(mat *Tensor, vec *Tensor) { - - err := ts.Addmv_(mat, vec) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustAddmvOut(out *Tensor, mat *Tensor, vec *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.AddmvOut(out, mat, vec, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAddr(vec1 *Tensor, vec2 *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Addr(vec1, vec2, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAddr_(vec1 *Tensor, vec2 *Tensor) { - - err := ts.Addr_(vec1, vec2) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustAddrOut(out *Tensor, vec1 *Tensor, vec2 *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.AddrOut(out, vec1, vec2, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustAffineGridGenerator(theta *Tensor, size []int64, alignCorners bool) (retVal *Tensor) { - - retVal, err := AffineGridGenerator(theta, size, alignCorners) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustAffineGridGeneratorBackward(grad *Tensor, size []int64, alignCorners bool) (retVal *Tensor) { - - retVal, err := AffineGridGeneratorBackward(grad, size, alignCorners) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAlias(del bool) (retVal *Tensor) { - - retVal, err := ts.Alias(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAlignAs(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.AlignAs(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAll(del bool) (retVal *Tensor) { - - retVal, err := ts.All(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAll1(dim int64, keepdim bool, del bool) (retVal *Tensor) { - - retVal, err := ts.All1(dim, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAllOut(out *Tensor, dim int64, keepdim bool, del bool) (retVal *Tensor) { - - retVal, err := ts.AllOut(out, dim, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustAlphaDropout(input *Tensor, p float64, train bool) (retVal *Tensor) { - - retVal, err := AlphaDropout(input, p, train) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAlphaDropout_(p float64, train bool) { - - err := ts.AlphaDropout_(p, train) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustAngle(del bool) (retVal *Tensor) { - - retVal, err := ts.Angle(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAngleOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.AngleOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAny(del bool) (retVal *Tensor) { - - retVal, err := ts.Any(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAny1(dim int64, keepdim bool, del bool) (retVal *Tensor) { - - retVal, err := ts.Any1(dim, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAnyOut(out *Tensor, dim int64, keepdim bool, del bool) (retVal *Tensor) { - - retVal, err := ts.AnyOut(out, dim, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustArange(end *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := Arange(end, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustArange1(start *Scalar, end *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := Arange1(start, end, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustArange2(start *Scalar, end *Scalar, step *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := Arange2(start, end, step, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustArangeOut(out *Tensor, end *Scalar) (retVal *Tensor) { - - retVal, err := ArangeOut(out, end) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustArangeOut1(out *Tensor, start *Scalar, end *Scalar) (retVal *Tensor) { - - retVal, err := ArangeOut1(out, start, end) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustArgmax(dim int64, keepdim bool, del bool) (retVal *Tensor) { - - retVal, err := ts.Argmax(dim, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustArgmin(dim int64, keepdim bool, del bool) (retVal *Tensor) { - - retVal, err := ts.Argmin(dim, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustArgsort(dim int64, descending bool, del bool) (retVal *Tensor) { - - retVal, err := ts.Argsort(dim, descending, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAsStrided(size []int64, stride []int64, storageOffset int64, del bool) (retVal *Tensor) { - - retVal, err := ts.AsStrided(size, stride, storageOffset, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAsStrided_(size []int64, stride []int64, storageOffset int64) { - - err := ts.AsStrided_(size, stride, storageOffset) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustAsin(del bool) (retVal *Tensor) { - - retVal, err := ts.Asin(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAsin_() { - - err := ts.Asin_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustAsinOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.AsinOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAtan(del bool) (retVal *Tensor) { - - retVal, err := ts.Atan(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAtan2(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Atan2(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAtan2_(other *Tensor) { - - err := ts.Atan2_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustAtan2Out(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Atan2Out(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAtan_() { - - err := ts.Atan_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustAtanOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.AtanOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAvgPool1d(kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, del bool) (retVal *Tensor) { - - retVal, err := ts.AvgPool1d(kernelSize, stride, padding, ceilMode, countIncludePad, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAvgPool2d(kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride int64, del bool) (retVal *Tensor) { - - retVal, err := ts.AvgPool2d(kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAvgPool2dBackward(gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride int64, del bool) (retVal *Tensor) { - - retVal, err := ts.AvgPool2dBackward(gradOutput, kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAvgPool2dBackwardOut(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride int64, del bool) (retVal *Tensor) { - - retVal, err := ts.AvgPool2dBackwardOut(gradInput, gradOutput, kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAvgPool2dOut(out *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride int64, del bool) (retVal *Tensor) { - - retVal, err := ts.AvgPool2dOut(out, kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAvgPool3d(kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride int64, del bool) (retVal *Tensor) { - - retVal, err := ts.AvgPool3d(kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAvgPool3dBackward(gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride int64, del bool) (retVal *Tensor) { - - retVal, err := ts.AvgPool3dBackward(gradOutput, kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAvgPool3dBackwardOut(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride int64, del bool) (retVal *Tensor) { - - retVal, err := ts.AvgPool3dBackwardOut(gradInput, gradOutput, kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustAvgPool3dOut(out *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride int64, del bool) (retVal *Tensor) { - - retVal, err := ts.AvgPool3dOut(out, kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustBaddbmm(batch1 *Tensor, batch2 *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Baddbmm(batch1, batch2, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustBaddbmm_(batch1 *Tensor, batch2 *Tensor) { - - err := ts.Baddbmm_(batch1, batch2) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustBaddbmmOut(out *Tensor, batch1 *Tensor, batch2 *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.BaddbmmOut(out, batch1, batch2, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustBartlettWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := BartlettWindow(windowLength, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustBartlettWindow1(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := BartlettWindow1(windowLength, periodic, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustBatchNorm(input *Tensor, weight *Tensor, bias *Tensor, runningMean *Tensor, runningVar *Tensor, training bool, momentum float64, eps float64, cudnnEnabled bool) (retVal *Tensor) { - - retVal, err := BatchNorm(input, weight, bias, runningMean, runningVar, training, momentum, eps, cudnnEnabled) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustBatchNormBackwardElemt(gradOut *Tensor, input *Tensor, mean *Tensor, invstd *Tensor, weight *Tensor, meanDy *Tensor, meanDyXmu *Tensor) (retVal *Tensor) { - - retVal, err := BatchNormBackwardElemt(gradOut, input, mean, invstd, weight, meanDy, meanDyXmu) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustBatchNormElemt(input *Tensor, weight *Tensor, bias *Tensor, mean *Tensor, invstd *Tensor, eps float64) (retVal *Tensor) { - - retVal, err := BatchNormElemt(input, weight, bias, mean, invstd, eps) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustBatchNormElemtOut(out *Tensor, input *Tensor, weight *Tensor, bias *Tensor, mean *Tensor, invstd *Tensor, eps float64) (retVal *Tensor) { - - retVal, err := BatchNormElemtOut(out, input, weight, bias, mean, invstd, eps) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustBernoulli(del bool) (retVal *Tensor) { - - retVal, err := ts.Bernoulli(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustBernoulli1(p float64, del bool) (retVal *Tensor) { - - retVal, err := ts.Bernoulli1(p, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustBernoulli_(p *Tensor) { - - err := ts.Bernoulli_(p) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustBernoulli1_(p float64) { - - err := ts.Bernoulli1_(p) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustBernoulliOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.BernoulliOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustBilinear(input1 *Tensor, input2 *Tensor, weight *Tensor, bias *Tensor) (retVal *Tensor) { - - retVal, err := Bilinear(input1, input2, weight, bias) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustBinaryCrossEntropy(target *Tensor, weight *Tensor, reduction int64, del bool) (retVal *Tensor) { - - retVal, err := ts.BinaryCrossEntropy(target, weight, reduction, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustBinaryCrossEntropyBackward(gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, del bool) (retVal *Tensor) { - - retVal, err := ts.BinaryCrossEntropyBackward(gradOutput, target, weight, reduction, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustBinaryCrossEntropyBackwardOut(gradInput *Tensor, gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, del bool) (retVal *Tensor) { - - retVal, err := ts.BinaryCrossEntropyBackwardOut(gradInput, gradOutput, target, weight, reduction, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustBinaryCrossEntropyOut(out *Tensor, target *Tensor, weight *Tensor, reduction int64, del bool) (retVal *Tensor) { - - retVal, err := ts.BinaryCrossEntropyOut(out, target, weight, reduction, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustBinaryCrossEntropyWithLogits(target *Tensor, weight *Tensor, posWeight *Tensor, reduction int64, del bool) (retVal *Tensor) { - - retVal, err := ts.BinaryCrossEntropyWithLogits(target, weight, posWeight, reduction, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustBinaryCrossEntropyWithLogitsBackward(gradOutput *Tensor, target *Tensor, weight *Tensor, posWeight *Tensor, reduction int64, del bool) (retVal *Tensor) { - - retVal, err := ts.BinaryCrossEntropyWithLogitsBackward(gradOutput, target, weight, posWeight, reduction, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustBincount(weights *Tensor, minlength int64, del bool) (retVal *Tensor) { - - retVal, err := ts.Bincount(weights, minlength, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustBitwiseAnd(other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.BitwiseAnd(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustBitwiseAnd1(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.BitwiseAnd1(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustBitwiseAnd_(other *Scalar) { - - err := ts.BitwiseAnd_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustBitwiseAnd1_(other *Tensor) { - - err := ts.BitwiseAnd1_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustBitwiseAndOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.BitwiseAndOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustBitwiseAndOut1(out *Tensor, other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.BitwiseAndOut1(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustBitwiseNot(del bool) (retVal *Tensor) { - - retVal, err := ts.BitwiseNot(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustBitwiseNot_() { - - err := ts.BitwiseNot_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustBitwiseNotOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.BitwiseNotOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustBitwiseOr(other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.BitwiseOr(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustBitwiseOr1(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.BitwiseOr1(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustBitwiseOr_(other *Scalar) { - - err := ts.BitwiseOr_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustBitwiseOr1_(other *Tensor) { - - err := ts.BitwiseOr1_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustBitwiseOrOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.BitwiseOrOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustBitwiseOrOut1(out *Tensor, other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.BitwiseOrOut1(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustBitwiseXor(other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.BitwiseXor(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustBitwiseXor1(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.BitwiseXor1(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustBitwiseXor_(other *Scalar) { - - err := ts.BitwiseXor_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustBitwiseXor1_(other *Tensor) { - - err := ts.BitwiseXor1_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustBitwiseXorOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.BitwiseXorOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustBitwiseXorOut1(out *Tensor, other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.BitwiseXorOut1(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustBlackmanWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := BlackmanWindow(windowLength, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustBlackmanWindow1(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := BlackmanWindow1(windowLength, periodic, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustBmm(mat2 *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Bmm(mat2, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustBmmOut(out *Tensor, mat2 *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.BmmOut(out, mat2, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustCartesianProd(tensors []Tensor) (retVal *Tensor) { - - retVal, err := CartesianProd(tensors) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustCat(tensors []Tensor, dim int64) (retVal *Tensor) { - - retVal, err := Cat(tensors, dim) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustCatOut(out *Tensor, tensors []Tensor, dim int64) (retVal *Tensor) { - - retVal, err := CatOut(out, tensors, dim) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCauchy_(median float64, sigma float64) { - - err := ts.Cauchy_(median, sigma) - if err != nil { - log.Fatal(err) - } - - return -} - -func MustCdist(x1 *Tensor, x2 *Tensor, p float64, computeMode int64) (retVal *Tensor) { - - retVal, err := Cdist(x1, x2, p, computeMode) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCeil(del bool) (retVal *Tensor) { - - retVal, err := ts.Ceil(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCeil_() { - - err := ts.Ceil_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustCeilOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.CeilOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCelu(del bool) (retVal *Tensor) { - - retVal, err := ts.Celu(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCelu_() { - - err := ts.Celu_() - if err != nil { - log.Fatal(err) - } - - return -} - -func MustChainMatmul(matrices []Tensor) (retVal *Tensor) { - - retVal, err := ChainMatmul(matrices) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCholesky(upper bool, del bool) (retVal *Tensor) { - - retVal, err := ts.Cholesky(upper, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCholeskyInverse(upper bool, del bool) (retVal *Tensor) { - - retVal, err := ts.CholeskyInverse(upper, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCholeskyInverseOut(out *Tensor, upper bool, del bool) (retVal *Tensor) { - - retVal, err := ts.CholeskyInverseOut(out, upper, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCholeskyOut(out *Tensor, upper bool, del bool) (retVal *Tensor) { - - retVal, err := ts.CholeskyOut(out, upper, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCholeskySolve(input2 *Tensor, upper bool, del bool) (retVal *Tensor) { - - retVal, err := ts.CholeskySolve(input2, upper, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCholeskySolveOut(out *Tensor, input2 *Tensor, upper bool, del bool) (retVal *Tensor) { - - retVal, err := ts.CholeskySolveOut(out, input2, upper, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustClamp(min *Scalar, max *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.Clamp(min, max, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustClamp_(min *Scalar, max *Scalar) { - - err := ts.Clamp_(min, max) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustClampMax(max *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.ClampMax(max, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustClampMax_(max *Scalar) { - - err := ts.ClampMax_(max) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustClampMaxOut(out *Tensor, max *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.ClampMaxOut(out, max, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustClampMin(min *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.ClampMin(min, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustClampMin_(min *Scalar) { - - err := ts.ClampMin_(min) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustClampMinOut(out *Tensor, min *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.ClampMinOut(out, min, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustClampOut(out *Tensor, min *Scalar, max *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.ClampOut(out, min, max, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCoalesce(del bool) (retVal *Tensor) { - - retVal, err := ts.Coalesce(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCol2im(outputSize []int64, kernelSize []int64, dilation []int64, padding []int64, stride []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.Col2im(outputSize, kernelSize, dilation, padding, stride, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustCol2imBackward(gradOutput *Tensor, kernelSize []int64, dilation []int64, padding []int64, stride []int64) (retVal *Tensor) { - - retVal, err := Col2imBackward(gradOutput, kernelSize, dilation, padding, stride) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustCol2imBackwardOut(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, dilation []int64, padding []int64, stride []int64) (retVal *Tensor) { - - retVal, err := Col2imBackwardOut(gradInput, gradOutput, kernelSize, dilation, padding, stride) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCol2imOut(out *Tensor, outputSize []int64, kernelSize []int64, dilation []int64, padding []int64, stride []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.Col2imOut(out, outputSize, kernelSize, dilation, padding, stride, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCombinations(r int64, withReplacement bool, del bool) (retVal *Tensor) { - - retVal, err := ts.Combinations(r, withReplacement, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustConj(del bool) (retVal *Tensor) { - - retVal, err := ts.Conj(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustConjOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.ConjOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustConstantPadNd(pad []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.ConstantPadNd(pad, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustContiguous(del bool) (retVal *Tensor) { - - retVal, err := ts.Contiguous(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustConv1d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, groups int64) (retVal *Tensor) { - - retVal, err := Conv1d(input, weight, bias, stride, padding, dilation, groups) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustConv2d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, groups int64) (retVal *Tensor) { - - retVal, err := Conv2d(input, weight, bias, stride, padding, dilation, groups) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustConv3d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, groups int64) (retVal *Tensor) { - - retVal, err := Conv3d(input, weight, bias, stride, padding, dilation, groups) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustConvTbc(weight *Tensor, bias *Tensor, pad int64, del bool) (retVal *Tensor) { - - retVal, err := ts.ConvTbc(weight, bias, pad, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustConvTranspose1d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, groups int64, dilation []int64) (retVal *Tensor) { - - retVal, err := ConvTranspose1d(input, weight, bias, stride, padding, outputPadding, groups, dilation) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustConvTranspose2d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, groups int64, dilation []int64) (retVal *Tensor) { - - retVal, err := ConvTranspose2d(input, weight, bias, stride, padding, outputPadding, groups, dilation) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustConvTranspose3d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, groups int64, dilation []int64) (retVal *Tensor) { - - retVal, err := ConvTranspose3d(input, weight, bias, stride, padding, outputPadding, groups, dilation) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustConvolution(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64, groups int64) (retVal *Tensor) { - - retVal, err := Convolution(input, weight, bias, stride, padding, dilation, transposed, outputPadding, groups) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustConvolutionOverrideable(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64, groups int64) (retVal *Tensor) { - - retVal, err := ConvolutionOverrideable(input, weight, bias, stride, padding, dilation, transposed, outputPadding, groups) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCopySparseToSparse_(src *Tensor, nonBlocking bool) { - - err := ts.CopySparseToSparse_(src, nonBlocking) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustCos(del bool) (retVal *Tensor) { - - retVal, err := ts.Cos(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCos_() { - - err := ts.Cos_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustCosOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.CosOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCosh(del bool) (retVal *Tensor) { - - retVal, err := ts.Cosh(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCosh_() { - - err := ts.Cosh_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustCoshOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.CoshOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustCosineEmbeddingLoss(input1 *Tensor, input2 *Tensor, target *Tensor, margin float64, reduction int64) (retVal *Tensor) { - - retVal, err := CosineEmbeddingLoss(input1, input2, target, margin, reduction) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustCosineSimilarity(x1 *Tensor, x2 *Tensor, dim int64, eps float64) (retVal *Tensor) { - - retVal, err := CosineSimilarity(x1, x2, dim, eps) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCross(other *Tensor, dim int64, del bool) (retVal *Tensor) { - - retVal, err := ts.Cross(other, dim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCrossOut(out *Tensor, other *Tensor, dim int64, del bool) (retVal *Tensor) { - - retVal, err := ts.CrossOut(out, other, dim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustCtcLoss(logProbs *Tensor, targets *Tensor, inputLengths []int64, targetLengths []int64, blank int64, reduction int64, zeroInfinity bool) (retVal *Tensor) { - - retVal, err := CtcLoss(logProbs, targets, inputLengths, targetLengths, blank, reduction, zeroInfinity) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustCtcLoss1(logProbs *Tensor, targets *Tensor, inputLengths *Tensor, targetLengths *Tensor, blank int64, reduction int64, zeroInfinity bool) (retVal *Tensor) { - - retVal, err := CtcLoss1(logProbs, targets, inputLengths, targetLengths, blank, reduction, zeroInfinity) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustCudnnAffineGridGenerator(theta *Tensor, n int64, c int64, h int64, w int64) (retVal *Tensor) { - - retVal, err := CudnnAffineGridGenerator(theta, n, c, h, w) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustCudnnAffineGridGeneratorBackward(grad *Tensor, n int64, c int64, h int64, w int64) (retVal *Tensor) { - - retVal, err := CudnnAffineGridGeneratorBackward(grad, n, c, h, w) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCudnnConvolution(weight *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool) (retVal *Tensor) { - - retVal, err := ts.CudnnConvolution(weight, padding, stride, dilation, groups, benchmark, deterministic, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCudnnConvolution1(weight *Tensor, bias *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool) (retVal *Tensor) { - - retVal, err := ts.CudnnConvolution1(weight, bias, padding, stride, dilation, groups, benchmark, deterministic, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustCudnnConvolutionBackwardInput(selfSize []int64, gradOutput *Tensor, weight *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool) (retVal *Tensor) { - - retVal, err := CudnnConvolutionBackwardInput(selfSize, gradOutput, weight, padding, stride, dilation, groups, benchmark, deterministic) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCudnnConvolutionBackwardWeight(weightSize []int64, gradOutput *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool) (retVal *Tensor) { - - retVal, err := ts.CudnnConvolutionBackwardWeight(weightSize, gradOutput, padding, stride, dilation, groups, benchmark, deterministic, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCudnnConvolutionTranspose(weight *Tensor, padding []int64, outputPadding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool) (retVal *Tensor) { - - retVal, err := ts.CudnnConvolutionTranspose(weight, padding, outputPadding, stride, dilation, groups, benchmark, deterministic, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCudnnConvolutionTranspose1(weight *Tensor, bias *Tensor, padding []int64, outputPadding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool) (retVal *Tensor) { - - retVal, err := ts.CudnnConvolutionTranspose1(weight, bias, padding, outputPadding, stride, dilation, groups, benchmark, deterministic, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustCudnnConvolutionTransposeBackwardInput(gradOutput *Tensor, weight *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool) (retVal *Tensor) { - - retVal, err := CudnnConvolutionTransposeBackwardInput(gradOutput, weight, padding, stride, dilation, groups, benchmark, deterministic) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCudnnConvolutionTransposeBackwardWeight(weightSize []int64, gradOutput *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool) (retVal *Tensor) { - - retVal, err := ts.CudnnConvolutionTransposeBackwardWeight(weightSize, gradOutput, padding, stride, dilation, groups, benchmark, deterministic, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCudnnGridSampler(grid *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.CudnnGridSampler(grid, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCumprod(dim int64, dtype gotch.DType, del bool) (retVal *Tensor) { - - retVal, err := ts.Cumprod(dim, dtype, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCumprodOut(out *Tensor, dim int64, dtype gotch.DType, del bool) (retVal *Tensor) { - - retVal, err := ts.CumprodOut(out, dim, dtype, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCumsum(dim int64, dtype gotch.DType, del bool) (retVal *Tensor) { - - retVal, err := ts.Cumsum(dim, dtype, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustCumsumOut(out *Tensor, dim int64, dtype gotch.DType, del bool) (retVal *Tensor) { - - retVal, err := ts.CumsumOut(out, dim, dtype, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustData(del bool) (retVal *Tensor) { - - retVal, err := ts.Data(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustDequantize(del bool) (retVal *Tensor) { - - retVal, err := ts.Dequantize(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustDet(del bool) (retVal *Tensor) { - - retVal, err := ts.Det(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustDetach(del bool) (retVal *Tensor) { - - retVal, err := ts.Detach(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustDetach_() { - - err := ts.Detach_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustDiag(diagonal int64, del bool) (retVal *Tensor) { - - retVal, err := ts.Diag(diagonal, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustDiagEmbed(offset int64, dim1 int64, dim2 int64, del bool) (retVal *Tensor) { - - retVal, err := ts.DiagEmbed(offset, dim1, dim2, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustDiagOut(out *Tensor, diagonal int64, del bool) (retVal *Tensor) { - - retVal, err := ts.DiagOut(out, diagonal, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustDiagflat(offset int64, del bool) (retVal *Tensor) { - - retVal, err := ts.Diagflat(offset, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustDiagonal(offset int64, dim1 int64, dim2 int64, del bool) (retVal *Tensor) { - - retVal, err := ts.Diagonal(offset, dim1, dim2, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustDigamma(del bool) (retVal *Tensor) { - - retVal, err := ts.Digamma(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustDigamma_() { - - err := ts.Digamma_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustDigammaOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.DigammaOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustDist(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Dist(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustDiv(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Div(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustDiv1(other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.Div1(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustDiv_(other *Tensor) { - - err := ts.Div_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustDiv1_(other *Scalar) { - - err := ts.Div1_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustDivOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.DivOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustDot(tensor *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Dot(tensor, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustDotOut(out *Tensor, tensor *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.DotOut(out, tensor, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustDropout(input *Tensor, p float64, train bool) (retVal *Tensor) { - - retVal, err := Dropout(input, p, train) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustDropout_(p float64, train bool) { - - err := ts.Dropout_(p, train) - if err != nil { - log.Fatal(err) - } - - return -} - -func MustEinsum(equation string, tensors []Tensor) (retVal *Tensor) { - - retVal, err := Einsum(equation, tensors) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustElu(del bool) (retVal *Tensor) { - - retVal, err := ts.Elu(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustElu_() { - - err := ts.Elu_() - if err != nil { - log.Fatal(err) - } - - return -} - -func MustEluBackward(gradOutput *Tensor, alpha *Scalar, scale *Scalar, inputScale *Scalar, output *Tensor) (retVal *Tensor) { - - retVal, err := EluBackward(gradOutput, alpha, scale, inputScale, output) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustEluBackwardOut(gradInput *Tensor, gradOutput *Tensor, alpha *Scalar, scale *Scalar, inputScale *Scalar, output *Tensor) (retVal *Tensor) { - - retVal, err := EluBackwardOut(gradInput, gradOutput, alpha, scale, inputScale, output) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustEluOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.EluOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustEmbedding(weight *Tensor, indices *Tensor, paddingIdx int64, scaleGradByFreq bool, sparse bool) (retVal *Tensor) { - - retVal, err := Embedding(weight, indices, paddingIdx, scaleGradByFreq, sparse) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustEmbeddingBackward(grad *Tensor, indices *Tensor, numWeights int64, paddingIdx int64, scaleGradByFreq bool, sparse bool) (retVal *Tensor) { - - retVal, err := EmbeddingBackward(grad, indices, numWeights, paddingIdx, scaleGradByFreq, sparse) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustEmbeddingDenseBackward(gradOutput *Tensor, indices *Tensor, numWeights int64, paddingIdx int64, scaleGradByFreq bool) (retVal *Tensor) { - - retVal, err := EmbeddingDenseBackward(gradOutput, indices, numWeights, paddingIdx, scaleGradByFreq) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustEmbeddingRenorm_(indices *Tensor, maxNorm float64, normType float64) { - - err := ts.EmbeddingRenorm_(indices, maxNorm, normType) - if err != nil { - log.Fatal(err) - } - - return -} - -func MustEmbeddingSparseBackward(grad *Tensor, indices *Tensor, numWeights int64, paddingIdx int64, scaleGradByFreq bool) (retVal *Tensor) { - - retVal, err := EmbeddingSparseBackward(grad, indices, numWeights, paddingIdx, scaleGradByFreq) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustEmpty(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := Empty(size, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustEmptyLike(del bool) (retVal *Tensor) { - - retVal, err := ts.EmptyLike(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustEmptyOut(out *Tensor, size []int64) (retVal *Tensor) { - - retVal, err := EmptyOut(out, size) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustEmptyStrided(size []int64, stride []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := EmptyStrided(size, stride, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustEq(other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.Eq(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustEq1(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Eq1(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustEq_(other *Scalar) { - - err := ts.Eq_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustEq1_(other *Tensor) { - - err := ts.Eq1_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustEqOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.EqOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustEqOut1(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.EqOut1(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustErf(del bool) (retVal *Tensor) { - - retVal, err := ts.Erf(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustErf_() { - - err := ts.Erf_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustErfOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.ErfOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustErfc(del bool) (retVal *Tensor) { - - retVal, err := ts.Erfc(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustErfc_() { - - err := ts.Erfc_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustErfcOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.ErfcOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustErfinv(del bool) (retVal *Tensor) { - - retVal, err := ts.Erfinv(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustErfinv_() { - - err := ts.Erfinv_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustErfinvOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.ErfinvOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustExp(del bool) (retVal *Tensor) { - - retVal, err := ts.Exp(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustExp_() { - - err := ts.Exp_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustExpOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.ExpOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustExpand(size []int64, implicit bool, del bool) (retVal *Tensor) { - - retVal, err := ts.Expand(size, implicit, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustExpandAs(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.ExpandAs(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustExpm1(del bool) (retVal *Tensor) { - - retVal, err := ts.Expm1(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustExpm1_() { - - err := ts.Expm1_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustExpm1Out(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Expm1Out(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustExponential_(lambd float64) { - - err := ts.Exponential_(lambd) - if err != nil { - log.Fatal(err) - } - - return -} - -func MustEye(n int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := Eye(n, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustEye1(n int64, m int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := Eye1(n, m, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustEyeOut(out *Tensor, n int64) (retVal *Tensor) { - - retVal, err := EyeOut(out, n) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustEyeOut1(out *Tensor, n int64, m int64) (retVal *Tensor) { - - retVal, err := EyeOut1(out, n, m) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFakeQuantizePerChannelAffine(scale *Tensor, zeroPoint *Tensor, axis int64, quantMin int64, quantMax int64, del bool) (retVal *Tensor) { - - retVal, err := ts.FakeQuantizePerChannelAffine(scale, zeroPoint, axis, quantMin, quantMax, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFakeQuantizePerChannelAffineBackward(grad *Tensor, scale *Tensor, zeroPoint *Tensor, axis int64, quantMin int64, quantMax int64, del bool) (retVal *Tensor) { - - retVal, err := ts.FakeQuantizePerChannelAffineBackward(grad, scale, zeroPoint, axis, quantMin, quantMax, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFakeQuantizePerTensorAffine(scale float64, zeroPoint int64, quantMin int64, quantMax int64, del bool) (retVal *Tensor) { - - retVal, err := ts.FakeQuantizePerTensorAffine(scale, zeroPoint, quantMin, quantMax, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFakeQuantizePerTensorAffineBackward(grad *Tensor, scale float64, zeroPoint int64, quantMin int64, quantMax int64, del bool) (retVal *Tensor) { - - retVal, err := ts.FakeQuantizePerTensorAffineBackward(grad, scale, zeroPoint, quantMin, quantMax, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustFbgemmLinearFp16Weight(input *Tensor, packedWeight *Tensor, bias *Tensor) (retVal *Tensor) { - - retVal, err := FbgemmLinearFp16Weight(input, packedWeight, bias) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustFbgemmLinearFp16WeightFp32Activation(input *Tensor, packedWeight *Tensor, bias *Tensor) (retVal *Tensor) { - - retVal, err := FbgemmLinearFp16WeightFp32Activation(input, packedWeight, bias) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustFbgemmLinearInt8Weight(input *Tensor, weight *Tensor, packed *Tensor, colOffsets *Tensor, weightScale *Scalar, weightZeroPoint *Scalar, bias *Tensor) (retVal *Tensor) { - - retVal, err := FbgemmLinearInt8Weight(input, weight, packed, colOffsets, weightScale, weightZeroPoint, bias) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustFbgemmLinearInt8WeightFp32Activation(input *Tensor, weight *Tensor, packed *Tensor, colOffsets *Tensor, weightScale *Scalar, weightZeroPoint *Scalar, bias *Tensor) (retVal *Tensor) { - - retVal, err := FbgemmLinearInt8WeightFp32Activation(input, weight, packed, colOffsets, weightScale, weightZeroPoint, bias) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustFbgemmPackGemmMatrixFp16(input *Tensor) (retVal *Tensor) { - - retVal, err := FbgemmPackGemmMatrixFp16(input) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustFbgemmPackQuantizedMatrix(input *Tensor) (retVal *Tensor) { - - retVal, err := FbgemmPackQuantizedMatrix(input) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustFbgemmPackQuantizedMatrix1(input *Tensor, k int64, n int64) (retVal *Tensor) { - - retVal, err := FbgemmPackQuantizedMatrix1(input, k, n) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustFeatureAlphaDropout(input *Tensor, p float64, train bool) (retVal *Tensor) { - - retVal, err := FeatureAlphaDropout(input, p, train) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFeatureAlphaDropout_(p float64, train bool) { - - err := ts.FeatureAlphaDropout_(p, train) - if err != nil { - log.Fatal(err) - } - - return -} - -func MustFeatureDropout(input *Tensor, p float64, train bool) (retVal *Tensor) { - - retVal, err := FeatureDropout(input, p, train) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFeatureDropout_(p float64, train bool) { - - err := ts.FeatureDropout_(p, train) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustFft(signalNdim int64, normalized bool, del bool) (retVal *Tensor) { - - retVal, err := ts.Fft(signalNdim, normalized, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFill_(value *Scalar) { - - err := ts.Fill_(value) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustFill1_(value *Tensor) { - - err := ts.Fill1_(value) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustFillDiagonal_(fillValue *Scalar, wrap bool) { - - err := ts.FillDiagonal_(fillValue, wrap) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustFlatten(startDim int64, endDim int64, del bool) (retVal *Tensor) { - - retVal, err := ts.Flatten(startDim, endDim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFlip(dims []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.Flip(dims, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFloor(del bool) (retVal *Tensor) { - - retVal, err := ts.Floor(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFloor_() { - - err := ts.Floor_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustFloorDivide(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.FloorDivide(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFloorDivide1(other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.FloorDivide1(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFloorDivide_(other *Tensor) { - - err := ts.FloorDivide_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustFloorDivide1_(other *Scalar) { - - err := ts.FloorDivide1_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustFloorDivideOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.FloorDivideOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFloorOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.FloorOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFmod(other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.Fmod(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFmod1(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Fmod1(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFmod_(other *Scalar) { - - err := ts.Fmod_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustFmod1_(other *Tensor) { - - err := ts.Fmod1_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustFmodOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.FmodOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFmodOut1(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.FmodOut1(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFrac(del bool) (retVal *Tensor) { - - retVal, err := ts.Frac(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFrac_() { - - err := ts.Frac_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustFracOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.FracOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFractionalMaxPool2dBackward(gradOutput *Tensor, kernelSize []int64, outputSize []int64, indices *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.FractionalMaxPool2dBackward(gradOutput, kernelSize, outputSize, indices, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFractionalMaxPool2dBackwardOut(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, outputSize []int64, indices *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.FractionalMaxPool2dBackwardOut(gradInput, gradOutput, kernelSize, outputSize, indices, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFractionalMaxPool3dBackward(gradOutput *Tensor, kernelSize []int64, outputSize []int64, indices *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.FractionalMaxPool3dBackward(gradOutput, kernelSize, outputSize, indices, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFractionalMaxPool3dBackwardOut(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, outputSize []int64, indices *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.FractionalMaxPool3dBackwardOut(gradInput, gradOutput, kernelSize, outputSize, indices, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFrobeniusNorm(del bool) (retVal *Tensor) { - - retVal, err := ts.FrobeniusNorm(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFrobeniusNorm1(dim []int64, keepdim bool, del bool) (retVal *Tensor) { - - retVal, err := ts.FrobeniusNorm1(dim, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFrobeniusNormOut(out *Tensor, dim []int64, keepdim bool, del bool) (retVal *Tensor) { - - retVal, err := ts.FrobeniusNormOut(out, dim, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustFromFile(filename string, shared bool, size int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := FromFile(filename, shared, size, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustFull(size []int64, fillValue *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := Full(size, fillValue, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustFullLike(fillValue *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.FullLike(fillValue, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustFullOut(out *Tensor, size []int64, fillValue *Scalar) (retVal *Tensor) { - - retVal, err := FullOut(out, size, fillValue) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustGather(dim int64, index *Tensor, sparseGrad bool, del bool) (retVal *Tensor) { - - retVal, err := ts.Gather(dim, index, sparseGrad, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustGatherOut(out *Tensor, dim int64, index *Tensor, sparseGrad bool, del bool) (retVal *Tensor) { - - retVal, err := ts.GatherOut(out, dim, index, sparseGrad, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustGe(other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.Ge(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustGe1(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Ge1(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustGe_(other *Scalar) { - - err := ts.Ge_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustGe1_(other *Tensor) { - - err := ts.Ge1_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustGeOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.GeOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustGeOut1(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.GeOut1(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustGelu(del bool) (retVal *Tensor) { - - retVal, err := ts.Gelu(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustGeluBackward(grad *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.GeluBackward(grad, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustGeometric_(p float64) { - - err := ts.Geometric_(p) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustGer(vec2 *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Ger(vec2, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustGerOut(out *Tensor, vec2 *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.GerOut(out, vec2, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustGlu(dim int64, del bool) (retVal *Tensor) { - - retVal, err := ts.Glu(dim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustGluBackward(gradOutput *Tensor, dim int64, del bool) (retVal *Tensor) { - - retVal, err := ts.GluBackward(gradOutput, dim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustGluBackwardOut(gradInput *Tensor, gradOutput *Tensor, dim int64, del bool) (retVal *Tensor) { - - retVal, err := ts.GluBackwardOut(gradInput, gradOutput, dim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustGluOut(out *Tensor, dim int64, del bool) (retVal *Tensor) { - - retVal, err := ts.GluOut(out, dim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustGrad(del bool) (retVal *Tensor) { - - retVal, err := ts.Grad(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustGridSampler(input *Tensor, grid *Tensor, interpolationMode int64, paddingMode int64, alignCorners bool) (retVal *Tensor) { - - retVal, err := GridSampler(input, grid, interpolationMode, paddingMode, alignCorners) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustGridSampler2d(input *Tensor, grid *Tensor, interpolationMode int64, paddingMode int64, alignCorners bool) (retVal *Tensor) { - - retVal, err := GridSampler2d(input, grid, interpolationMode, paddingMode, alignCorners) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustGridSampler3d(input *Tensor, grid *Tensor, interpolationMode int64, paddingMode int64, alignCorners bool) (retVal *Tensor) { - - retVal, err := GridSampler3d(input, grid, interpolationMode, paddingMode, alignCorners) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustGroupNorm(input *Tensor, numGroups int64, weight *Tensor, bias *Tensor, eps float64, cudnnEnabled bool) (retVal *Tensor) { - - retVal, err := GroupNorm(input, numGroups, weight, bias, eps, cudnnEnabled) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustGruCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor) (retVal *Tensor) { - - retVal, err := GruCell(input, hx, wIh, wHh, bIh, bHh) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustGt(other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.Gt(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustGt1(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Gt1(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustGt_(other *Scalar) { - - err := ts.Gt_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustGt1_(other *Tensor) { - - err := ts.Gt1_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustGtOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.GtOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustGtOut1(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.GtOut1(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustHammingWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := HammingWindow(windowLength, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustHammingWindow1(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := HammingWindow1(windowLength, periodic, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustHammingWindow2(windowLength int64, periodic bool, alpha float64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := HammingWindow2(windowLength, periodic, alpha, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustHammingWindow3(windowLength int64, periodic bool, alpha float64, beta float64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := HammingWindow3(windowLength, periodic, alpha, beta, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustHannWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := HannWindow(windowLength, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustHannWindow1(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := HannWindow1(windowLength, periodic, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustHardshrink(del bool) (retVal *Tensor) { - - retVal, err := ts.Hardshrink(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustHardshrinkBackward(gradOut *Tensor, lambd *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.HardshrinkBackward(gradOut, lambd, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustHardsigmoid(del bool) (retVal *Tensor) { - - retVal, err := ts.Hardsigmoid(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustHardsigmoid_() { - - err := ts.Hardsigmoid_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustHardsigmoidBackward(gradOutput *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.HardsigmoidBackward(gradOutput, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustHardsigmoidOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.HardsigmoidOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustHardtanh(del bool) (retVal *Tensor) { - - retVal, err := ts.Hardtanh(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustHardtanh_() { - - err := ts.Hardtanh_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustHardtanhBackward(gradOutput *Tensor, minVal *Scalar, maxVal *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.HardtanhBackward(gradOutput, minVal, maxVal, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustHardtanhBackwardOut(gradInput *Tensor, gradOutput *Tensor, minVal *Scalar, maxVal *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.HardtanhBackwardOut(gradInput, gradOutput, minVal, maxVal, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustHardtanhOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.HardtanhOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustHingeEmbeddingLoss(target *Tensor, margin float64, reduction int64, del bool) (retVal *Tensor) { - - retVal, err := ts.HingeEmbeddingLoss(target, margin, reduction, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustHistc(bins int64, del bool) (retVal *Tensor) { - - retVal, err := ts.Histc(bins, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustHistcOut(out *Tensor, bins int64, del bool) (retVal *Tensor) { - - retVal, err := ts.HistcOut(out, bins, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustHspmm(mat1 *Tensor, mat2 *Tensor) (retVal *Tensor) { - - retVal, err := Hspmm(mat1, mat2) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustHspmmOut(out *Tensor, mat1 *Tensor, mat2 *Tensor) (retVal *Tensor) { - - retVal, err := HspmmOut(out, mat1, mat2) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustIfft(signalNdim int64, normalized bool, del bool) (retVal *Tensor) { - - retVal, err := ts.Ifft(signalNdim, normalized, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustIm2col(kernelSize []int64, dilation []int64, padding []int64, stride []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.Im2col(kernelSize, dilation, padding, stride, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustIm2colBackward(gradOutput *Tensor, inputSize []int64, kernelSize []int64, dilation []int64, padding []int64, stride []int64) (retVal *Tensor) { - - retVal, err := Im2colBackward(gradOutput, inputSize, kernelSize, dilation, padding, stride) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustIm2colBackwardOut(gradInput *Tensor, gradOutput *Tensor, inputSize []int64, kernelSize []int64, dilation []int64, padding []int64, stride []int64) (retVal *Tensor) { - - retVal, err := Im2colBackwardOut(gradInput, gradOutput, inputSize, kernelSize, dilation, padding, stride) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustIm2colOut(out *Tensor, kernelSize []int64, dilation []int64, padding []int64, stride []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.Im2colOut(out, kernelSize, dilation, padding, stride, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustImag(del bool) (retVal *Tensor) { - - retVal, err := ts.Imag(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustIndex(indices []Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Index(indices, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustIndexAdd(dim int64, index *Tensor, source *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.IndexAdd(dim, index, source, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustIndexAdd_(dim int64, index *Tensor, source *Tensor) { - - err := ts.IndexAdd_(dim, index, source) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustIndexCopy(dim int64, index *Tensor, source *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.IndexCopy(dim, index, source, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustIndexCopy_(dim int64, index *Tensor, source *Tensor) { - - err := ts.IndexCopy_(dim, index, source) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustIndexFill(dim int64, index *Tensor, value *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.IndexFill(dim, index, value, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustIndexFill1(dim int64, index *Tensor, value *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.IndexFill1(dim, index, value, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustIndexFill_(dim int64, index *Tensor, value *Scalar) { - - err := ts.IndexFill_(dim, index, value) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustIndexFill1_(dim int64, index *Tensor, value *Tensor) { - - err := ts.IndexFill1_(dim, index, value) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustIndexPut(indices []Tensor, values *Tensor, accumulate bool, del bool) (retVal *Tensor) { - - retVal, err := ts.IndexPut(indices, values, accumulate, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustIndexPut_(indices []Tensor, values *Tensor, accumulate bool) { - - err := ts.IndexPut_(indices, values, accumulate) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustIndexSelect(dim int64, index *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.IndexSelect(dim, index, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustIndexSelectOut(out *Tensor, dim int64, index *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.IndexSelectOut(out, dim, index, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustIndices(del bool) (retVal *Tensor) { - - retVal, err := ts.Indices(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustInstanceNorm(input *Tensor, weight *Tensor, bias *Tensor, runningMean *Tensor, runningVar *Tensor, useInputStats bool, momentum float64, eps float64, cudnnEnabled bool) (retVal *Tensor) { - - retVal, err := InstanceNorm(input, weight, bias, runningMean, runningVar, useInputStats, momentum, eps, cudnnEnabled) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustIntRepr(del bool) (retVal *Tensor) { - - retVal, err := ts.IntRepr(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustInverse(del bool) (retVal *Tensor) { - - retVal, err := ts.Inverse(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustInverseOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.InverseOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustIrfft(signalNdim int64, normalized bool, onesided bool, signalSizes []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.Irfft(signalNdim, normalized, onesided, signalSizes, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustIsclose(other *Tensor, rtol float64, atol float64, equalNan bool, del bool) (retVal *Tensor) { - - retVal, err := ts.Isclose(other, rtol, atol, equalNan, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustIsfinite(del bool) (retVal *Tensor) { - - retVal, err := ts.Isfinite(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustIsinf(del bool) (retVal *Tensor) { - - retVal, err := ts.Isinf(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustIsnan(del bool) (retVal *Tensor) { - - retVal, err := ts.Isnan(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustKlDiv(target *Tensor, reduction int64, del bool) (retVal *Tensor) { - - retVal, err := ts.KlDiv(target, reduction, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustKlDivBackward(gradOutput *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor) { - - retVal, err := ts.KlDivBackward(gradOutput, target, reduction, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustL1Loss(target *Tensor, reduction int64, del bool) (retVal *Tensor) { - - retVal, err := ts.L1Loss(target, reduction, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustL1LossBackward(gradOutput *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor) { - - retVal, err := ts.L1LossBackward(gradOutput, target, reduction, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustL1LossBackwardOut(gradInput *Tensor, gradOutput *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor) { - - retVal, err := ts.L1LossBackwardOut(gradInput, gradOutput, target, reduction, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustL1LossOut(out *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor) { - - retVal, err := ts.L1LossOut(out, target, reduction, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustLayerNorm(input *Tensor, normalizedShape []int64, weight *Tensor, bias *Tensor, eps float64, cudnnEnable bool) (retVal *Tensor) { - - retVal, err := LayerNorm(input, normalizedShape, weight, bias, eps, cudnnEnable) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLe(other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.Le(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLe1(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Le1(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLe_(other *Scalar) { - - err := ts.Le_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustLe1_(other *Tensor) { - - err := ts.Le1_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustLeOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.LeOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLeOut1(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.LeOut1(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLeakyRelu(del bool) (retVal *Tensor) { - - retVal, err := ts.LeakyRelu(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLeakyRelu_() { - - err := ts.LeakyRelu_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustLeakyReluBackward(gradOutput *Tensor, negativeSlope *Scalar, selfIsResult bool, del bool) (retVal *Tensor) { - - retVal, err := ts.LeakyReluBackward(gradOutput, negativeSlope, selfIsResult, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLeakyReluOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.LeakyReluOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLerp(end *Tensor, weight *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.Lerp(end, weight, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLerp1(end *Tensor, weight *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Lerp1(end, weight, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLerp_(end *Tensor, weight *Scalar) { - - err := ts.Lerp_(end, weight) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustLerp1_(end *Tensor, weight *Tensor) { - - err := ts.Lerp1_(end, weight) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustLerpOut(out *Tensor, end *Tensor, weight *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.LerpOut(out, end, weight, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLerpOut1(out *Tensor, end *Tensor, weight *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.LerpOut1(out, end, weight, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLgamma(del bool) (retVal *Tensor) { - - retVal, err := ts.Lgamma(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLgamma_() { - - err := ts.Lgamma_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustLgammaOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.LgammaOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustLinear(input *Tensor, weight *Tensor, bias *Tensor) (retVal *Tensor) { - - retVal, err := Linear(input, weight, bias) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustLinspace(start *Scalar, end *Scalar, steps int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := Linspace(start, end, steps, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustLinspaceOut(out *Tensor, start *Scalar, end *Scalar, steps int64) (retVal *Tensor) { - - retVal, err := LinspaceOut(out, start, end, steps) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLog(del bool) (retVal *Tensor) { - - retVal, err := ts.Log(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLog10(del bool) (retVal *Tensor) { - - retVal, err := ts.Log10(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLog10_() { - - err := ts.Log10_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustLog10Out(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Log10Out(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLog1p(del bool) (retVal *Tensor) { - - retVal, err := ts.Log1p(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLog1p_() { - - err := ts.Log1p_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustLog1pOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Log1pOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLog2(del bool) (retVal *Tensor) { - - retVal, err := ts.Log2(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLog2_() { - - err := ts.Log2_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustLog2Out(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Log2Out(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLog_() { - - err := ts.Log_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustLogNormal_(mean float64, std float64) { - - err := ts.LogNormal_(mean, std) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustLogOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.LogOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLogSigmoid(del bool) (retVal *Tensor) { - - retVal, err := ts.LogSigmoid(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLogSigmoidBackward(gradOutput *Tensor, buffer *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.LogSigmoidBackward(gradOutput, buffer, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLogSigmoidBackwardOut(gradInput *Tensor, gradOutput *Tensor, buffer *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.LogSigmoidBackwardOut(gradInput, gradOutput, buffer, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLogSigmoidOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.LogSigmoidOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLogSoftmax(dim int64, dtype gotch.DType, del bool) (retVal *Tensor) { - - retVal, err := ts.LogSoftmax(dim, dtype, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLogdet(del bool) (retVal *Tensor) { - - retVal, err := ts.Logdet(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLogicalAnd(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.LogicalAnd(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLogicalAnd_(other *Tensor) { - - err := ts.LogicalAnd_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustLogicalAndOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.LogicalAndOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLogicalNot(del bool) (retVal *Tensor) { - - retVal, err := ts.LogicalNot(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLogicalNot_() { - - err := ts.LogicalNot_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustLogicalNotOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.LogicalNotOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLogicalOr(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.LogicalOr(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLogicalOr_(other *Tensor) { - - err := ts.LogicalOr_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustLogicalOrOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.LogicalOrOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLogicalXor(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.LogicalXor(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLogicalXor_(other *Tensor) { - - err := ts.LogicalXor_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustLogicalXorOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.LogicalXorOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustLogspace(start *Scalar, end *Scalar, steps int64, base float64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := Logspace(start, end, steps, base, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustLogspaceOut(out *Tensor, start *Scalar, end *Scalar, steps int64, base float64) (retVal *Tensor) { - - retVal, err := LogspaceOut(out, start, end, steps, base) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLogsumexp(dim []int64, keepdim bool, del bool) (retVal *Tensor) { - - retVal, err := ts.Logsumexp(dim, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLogsumexpOut(out *Tensor, dim []int64, keepdim bool, del bool) (retVal *Tensor) { - - retVal, err := ts.LogsumexpOut(out, dim, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLt(other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.Lt(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLt1(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Lt1(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLt_(other *Scalar) { - - err := ts.Lt_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustLt1_(other *Tensor) { - - err := ts.Lt1_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustLtOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.LtOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLtOut1(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.LtOut1(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLuSolve(lUData *Tensor, lUPivots *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.LuSolve(lUData, lUPivots, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustLuSolveOut(out *Tensor, lUData *Tensor, lUPivots *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.LuSolveOut(out, lUData, lUPivots, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustMarginRankingLoss(input1 *Tensor, input2 *Tensor, target *Tensor, margin float64, reduction int64) (retVal *Tensor) { - - retVal, err := MarginRankingLoss(input1, input2, target, margin, reduction) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMaskedFill(mask *Tensor, value *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.MaskedFill(mask, value, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMaskedFill1(mask *Tensor, value *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.MaskedFill1(mask, value, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMaskedFill_(mask *Tensor, value *Scalar) { - - err := ts.MaskedFill_(mask, value) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustMaskedFill1_(mask *Tensor, value *Tensor) { - - err := ts.MaskedFill1_(mask, value) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustMaskedScatter(mask *Tensor, source *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.MaskedScatter(mask, source, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMaskedScatter_(mask *Tensor, source *Tensor) { - - err := ts.MaskedScatter_(mask, source) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustMaskedSelect(mask *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.MaskedSelect(mask, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMaskedSelectOut(out *Tensor, mask *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.MaskedSelectOut(out, mask, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMatmul(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Matmul(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMatmulOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.MatmulOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMatrixPower(n int64, del bool) (retVal *Tensor) { - - retVal, err := ts.MatrixPower(n, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMatrixRank(symmetric bool, del bool) (retVal *Tensor) { - - retVal, err := ts.MatrixRank(symmetric, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMatrixRank1(tol float64, symmetric bool, del bool) (retVal *Tensor) { - - retVal, err := ts.MatrixRank1(tol, symmetric, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMax(del bool) (retVal *Tensor) { - - retVal, err := ts.Max(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMax1(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Max1(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMaxOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.MaxOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMaxPool1d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool) (retVal *Tensor) { - - retVal, err := ts.MaxPool1d(kernelSize, stride, padding, dilation, ceilMode, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMaxPool2d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool) (retVal *Tensor) { - - retVal, err := ts.MaxPool2d(kernelSize, stride, padding, dilation, ceilMode, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMaxPool2dWithIndicesBackward(gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, indices *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.MaxPool2dWithIndicesBackward(gradOutput, kernelSize, stride, padding, dilation, ceilMode, indices, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMaxPool2dWithIndicesBackwardOut(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, indices *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.MaxPool2dWithIndicesBackwardOut(gradInput, gradOutput, kernelSize, stride, padding, dilation, ceilMode, indices, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMaxPool3d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool) (retVal *Tensor) { - - retVal, err := ts.MaxPool3d(kernelSize, stride, padding, dilation, ceilMode, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMaxPool3dWithIndicesBackward(gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, indices *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.MaxPool3dWithIndicesBackward(gradOutput, kernelSize, stride, padding, dilation, ceilMode, indices, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMaxPool3dWithIndicesBackwardOut(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, indices *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.MaxPool3dWithIndicesBackwardOut(gradInput, gradOutput, kernelSize, stride, padding, dilation, ceilMode, indices, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMaxUnpool2d(indices *Tensor, outputSize []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.MaxUnpool2d(indices, outputSize, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMaxUnpool2dBackward(gradOutput *Tensor, indices *Tensor, outputSize []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.MaxUnpool2dBackward(gradOutput, indices, outputSize, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMaxUnpool2dBackwardOut(gradInput *Tensor, gradOutput *Tensor, indices *Tensor, outputSize []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.MaxUnpool2dBackwardOut(gradInput, gradOutput, indices, outputSize, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMaxUnpool2dOut(out *Tensor, indices *Tensor, outputSize []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.MaxUnpool2dOut(out, indices, outputSize, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMaxUnpool3d(indices *Tensor, outputSize []int64, stride []int64, padding []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.MaxUnpool3d(indices, outputSize, stride, padding, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMaxUnpool3dBackward(gradOutput *Tensor, indices *Tensor, outputSize []int64, stride []int64, padding []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.MaxUnpool3dBackward(gradOutput, indices, outputSize, stride, padding, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMaxUnpool3dBackwardOut(gradInput *Tensor, gradOutput *Tensor, indices *Tensor, outputSize []int64, stride []int64, padding []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.MaxUnpool3dBackwardOut(gradInput, gradOutput, indices, outputSize, stride, padding, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMaxUnpool3dOut(out *Tensor, indices *Tensor, outputSize []int64, stride []int64, padding []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.MaxUnpool3dOut(out, indices, outputSize, stride, padding, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMaxValues(dim []int64, keepdim bool, del bool) (retVal *Tensor) { - - retVal, err := ts.MaxValues(dim, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMean(dtype gotch.DType, del bool) (retVal *Tensor) { - - retVal, err := ts.Mean(dtype, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMean1(dim []int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor) { - - retVal, err := ts.Mean1(dim, keepdim, dtype, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMeanOut(out *Tensor, dim []int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor) { - - retVal, err := ts.MeanOut(out, dim, keepdim, dtype, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMedian(del bool) (retVal *Tensor) { - - retVal, err := ts.Median(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMin(del bool) (retVal *Tensor) { - - retVal, err := ts.Min(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMin1(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Min1(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMinOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.MinOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMinValues(dim []int64, keepdim bool, del bool) (retVal *Tensor) { - - retVal, err := ts.MinValues(dim, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMiopenConvolution(weight *Tensor, bias *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool) (retVal *Tensor) { - - retVal, err := ts.MiopenConvolution(weight, bias, padding, stride, dilation, groups, benchmark, deterministic, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustMiopenConvolutionBackwardBias(gradOutput *Tensor) (retVal *Tensor) { - - retVal, err := MiopenConvolutionBackwardBias(gradOutput) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustMiopenConvolutionBackwardInput(selfSize []int64, gradOutput *Tensor, weight *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool) (retVal *Tensor) { - - retVal, err := MiopenConvolutionBackwardInput(selfSize, gradOutput, weight, padding, stride, dilation, groups, benchmark, deterministic) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMiopenConvolutionBackwardWeight(weightSize []int64, gradOutput *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool) (retVal *Tensor) { - - retVal, err := ts.MiopenConvolutionBackwardWeight(weightSize, gradOutput, padding, stride, dilation, groups, benchmark, deterministic, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMiopenConvolutionTranspose(weight *Tensor, bias *Tensor, padding []int64, outputPadding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool) (retVal *Tensor) { - - retVal, err := ts.MiopenConvolutionTranspose(weight, bias, padding, outputPadding, stride, dilation, groups, benchmark, deterministic, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustMiopenConvolutionTransposeBackwardInput(gradOutput *Tensor, weight *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool) (retVal *Tensor) { - - retVal, err := MiopenConvolutionTransposeBackwardInput(gradOutput, weight, padding, stride, dilation, groups, benchmark, deterministic) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMiopenConvolutionTransposeBackwardWeight(weightSize []int64, gradOutput *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool) (retVal *Tensor) { - - retVal, err := ts.MiopenConvolutionTransposeBackwardWeight(weightSize, gradOutput, padding, stride, dilation, groups, benchmark, deterministic, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMiopenDepthwiseConvolution(weight *Tensor, bias *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool) (retVal *Tensor) { - - retVal, err := ts.MiopenDepthwiseConvolution(weight, bias, padding, stride, dilation, groups, benchmark, deterministic, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustMiopenDepthwiseConvolutionBackwardInput(selfSize []int64, gradOutput *Tensor, weight *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool) (retVal *Tensor) { - - retVal, err := MiopenDepthwiseConvolutionBackwardInput(selfSize, gradOutput, weight, padding, stride, dilation, groups, benchmark, deterministic) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMiopenDepthwiseConvolutionBackwardWeight(weightSize []int64, gradOutput *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool) (retVal *Tensor) { - - retVal, err := ts.MiopenDepthwiseConvolutionBackwardWeight(weightSize, gradOutput, padding, stride, dilation, groups, benchmark, deterministic, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMkldnnAdaptiveAvgPool2d(outputSize []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.MkldnnAdaptiveAvgPool2d(outputSize, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMkldnnConvolution(weight *Tensor, bias *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, del bool) (retVal *Tensor) { - - retVal, err := ts.MkldnnConvolution(weight, bias, padding, stride, dilation, groups, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustMkldnnConvolutionBackwardInput(selfSize []int64, gradOutput *Tensor, weight *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, biasDefined bool) (retVal *Tensor) { - - retVal, err := MkldnnConvolutionBackwardInput(selfSize, gradOutput, weight, padding, stride, dilation, groups, biasDefined) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustMkldnnLinear(input *Tensor, weight *Tensor, bias *Tensor) (retVal *Tensor) { - - retVal, err := MkldnnLinear(input, weight, bias) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMkldnnMaxPool2d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool) (retVal *Tensor) { - - retVal, err := ts.MkldnnMaxPool2d(kernelSize, stride, padding, dilation, ceilMode, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMkldnnReorderConv2dWeight(padding []int64, stride []int64, dilation []int64, groups int64, del bool) (retVal *Tensor) { - - retVal, err := ts.MkldnnReorderConv2dWeight(padding, stride, dilation, groups, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMm(mat2 *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Mm(mat2, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMmOut(out *Tensor, mat2 *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.MmOut(out, mat2, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMseLoss(target *Tensor, reduction int64, del bool) (retVal *Tensor) { - - retVal, err := ts.MseLoss(target, reduction, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMseLossBackward(gradOutput *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor) { - - retVal, err := ts.MseLossBackward(gradOutput, target, reduction, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMseLossBackwardOut(gradInput *Tensor, gradOutput *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor) { - - retVal, err := ts.MseLossBackwardOut(gradInput, gradOutput, target, reduction, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMseLossOut(out *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor) { - - retVal, err := ts.MseLossOut(out, target, reduction, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMul(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Mul(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMul1(other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.Mul1(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMul_(other *Tensor) { - - err := ts.Mul_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustMul1_(other *Scalar) { - - err := ts.Mul1_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustMulOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.MulOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMultiMarginLossBackward(gradOutput *Tensor, target *Tensor, p *Scalar, margin *Scalar, weight *Tensor, reduction int64, del bool) (retVal *Tensor) { - - retVal, err := ts.MultiMarginLossBackward(gradOutput, target, p, margin, weight, reduction, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMultiMarginLossBackwardOut(gradInput *Tensor, gradOutput *Tensor, target *Tensor, p *Scalar, margin *Scalar, weight *Tensor, reduction int64, del bool) (retVal *Tensor) { - - retVal, err := ts.MultiMarginLossBackwardOut(gradInput, gradOutput, target, p, margin, weight, reduction, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMultilabelMarginLoss(target *Tensor, reduction int64, del bool) (retVal *Tensor) { - - retVal, err := ts.MultilabelMarginLoss(target, reduction, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMultilabelMarginLossBackward(gradOutput *Tensor, target *Tensor, reduction int64, isTarget *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.MultilabelMarginLossBackward(gradOutput, target, reduction, isTarget, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMultilabelMarginLossBackwardOut(gradInput *Tensor, gradOutput *Tensor, target *Tensor, reduction int64, isTarget *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.MultilabelMarginLossBackwardOut(gradInput, gradOutput, target, reduction, isTarget, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMultilabelMarginLossOut(out *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor) { - - retVal, err := ts.MultilabelMarginLossOut(out, target, reduction, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMultinomial(numSamples int64, replacement bool, del bool) (retVal *Tensor) { - - retVal, err := ts.Multinomial(numSamples, replacement, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMultinomialOut(out *Tensor, numSamples int64, replacement bool, del bool) (retVal *Tensor) { - - retVal, err := ts.MultinomialOut(out, numSamples, replacement, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMv(vec *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Mv(vec, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMvOut(out *Tensor, vec *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.MvOut(out, vec, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMvlgamma(p int64, del bool) (retVal *Tensor) { - - retVal, err := ts.Mvlgamma(p, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustMvlgamma_(p int64) { - - err := ts.Mvlgamma_(p) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustNarrow(dim int64, start int64, length int64, del bool) (retVal *Tensor) { - - retVal, err := ts.Narrow(dim, start, length, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNarrow1(dim int64, start *Tensor, length int64, del bool) (retVal *Tensor) { - - retVal, err := ts.Narrow1(dim, start, length, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNarrowCopy(dim int64, start int64, length int64, del bool) (retVal *Tensor) { - - retVal, err := ts.NarrowCopy(dim, start, length, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNativeNorm(del bool) (retVal *Tensor) { - - retVal, err := ts.NativeNorm(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNe(other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.Ne(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNe1(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Ne1(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNe_(other *Scalar) { - - err := ts.Ne_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustNe1_(other *Tensor) { - - err := ts.Ne1_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustNeOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.NeOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNeOut1(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.NeOut1(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNeg(del bool) (retVal *Tensor) { - - retVal, err := ts.Neg(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNeg_() { - - err := ts.Neg_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustNegOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.NegOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNewEmpty(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device, del bool) (retVal *Tensor) { - - retVal, err := ts.NewEmpty(size, optionsKind, optionsDevice, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNewFull(size []int64, fillValue *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device, del bool) (retVal *Tensor) { - - retVal, err := ts.NewFull(size, fillValue, optionsKind, optionsDevice, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNewZeros(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device, del bool) (retVal *Tensor) { - - retVal, err := ts.NewZeros(size, optionsKind, optionsDevice, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNllLoss(target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, del bool) (retVal *Tensor) { - - retVal, err := ts.NllLoss(target, weight, reduction, ignoreIndex, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNllLoss2d(target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, del bool) (retVal *Tensor) { - - retVal, err := ts.NllLoss2d(target, weight, reduction, ignoreIndex, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNllLoss2dBackward(gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, totalWeight *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.NllLoss2dBackward(gradOutput, target, weight, reduction, ignoreIndex, totalWeight, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNllLoss2dBackwardOut(gradInput *Tensor, gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, totalWeight *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.NllLoss2dBackwardOut(gradInput, gradOutput, target, weight, reduction, ignoreIndex, totalWeight, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNllLoss2dOut(out *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, del bool) (retVal *Tensor) { - - retVal, err := ts.NllLoss2dOut(out, target, weight, reduction, ignoreIndex, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNllLossBackward(gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, totalWeight *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.NllLossBackward(gradOutput, target, weight, reduction, ignoreIndex, totalWeight, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNllLossBackwardOut(gradInput *Tensor, gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, totalWeight *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.NllLossBackwardOut(gradInput, gradOutput, target, weight, reduction, ignoreIndex, totalWeight, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNllLossOut(out *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, del bool) (retVal *Tensor) { - - retVal, err := ts.NllLossOut(out, target, weight, reduction, ignoreIndex, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNonzero(del bool) (retVal *Tensor) { - - retVal, err := ts.Nonzero(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNonzeroOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.NonzeroOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNorm(del bool) (retVal *Tensor) { - - retVal, err := ts.Norm(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNorm1(p *Scalar, dtype gotch.DType, del bool) (retVal *Tensor) { - - retVal, err := ts.Norm1(p, dtype, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNorm2(p *Scalar, dim []int64, keepdim bool, del bool) (retVal *Tensor) { - - retVal, err := ts.Norm2(p, dim, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNorm3(p *Scalar, dim []int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor) { - - retVal, err := ts.Norm3(p, dim, keepdim, dtype, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustNormExceptDim(v *Tensor, pow int64, dim int64) (retVal *Tensor) { - - retVal, err := NormExceptDim(v, pow, dim) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNormOut(out *Tensor, p *Scalar, dim []int64, keepdim bool, del bool) (retVal *Tensor) { - - retVal, err := ts.NormOut(out, p, dim, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNormOut1(out *Tensor, p *Scalar, dim []int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor) { - - retVal, err := ts.NormOut1(out, p, dim, keepdim, dtype, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNormal_(mean float64, std float64) { - - err := ts.Normal_(mean, std) - if err != nil { - log.Fatal(err) - } - - return -} - -func MustNormalOut(out *Tensor, mean *Tensor, std float64) (retVal *Tensor) { - - retVal, err := NormalOut(out, mean, std) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustNormalOut1(out *Tensor, mean float64, std *Tensor) (retVal *Tensor) { - - retVal, err := NormalOut1(out, mean, std) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustNormalOut2(out *Tensor, mean *Tensor, std *Tensor) (retVal *Tensor) { - - retVal, err := NormalOut2(out, mean, std) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustNormalOut3(out *Tensor, mean float64, std float64, size []int64) (retVal *Tensor) { - - retVal, err := NormalOut3(out, mean, std, size) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNuclearNorm(keepdim bool, del bool) (retVal *Tensor) { - - retVal, err := ts.NuclearNorm(keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNuclearNorm1(dim []int64, keepdim bool, del bool) (retVal *Tensor) { - - retVal, err := ts.NuclearNorm1(dim, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNuclearNormOut(out *Tensor, keepdim bool, del bool) (retVal *Tensor) { - - retVal, err := ts.NuclearNormOut(out, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNuclearNormOut1(out *Tensor, dim []int64, keepdim bool, del bool) (retVal *Tensor) { - - retVal, err := ts.NuclearNormOut1(out, dim, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustNumpyT(del bool) (retVal *Tensor) { - - retVal, err := ts.NumpyT(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustOneHot(numClasses int64, del bool) (retVal *Tensor) { - - retVal, err := ts.OneHot(numClasses, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustOnes(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := Ones(size, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustOnesLike(del bool) (retVal *Tensor) { - - retVal, err := ts.OnesLike(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustOnesOut(out *Tensor, size []int64) (retVal *Tensor) { - - retVal, err := OnesOut(out, size) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustOrgqr(input2 *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Orgqr(input2, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustOrgqrOut(out *Tensor, input2 *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.OrgqrOut(out, input2, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustOrmqr(input2 *Tensor, input3 *Tensor, left bool, transpose bool, del bool) (retVal *Tensor) { - - retVal, err := ts.Ormqr(input2, input3, left, transpose, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustOrmqrOut(out *Tensor, input2 *Tensor, input3 *Tensor, left bool, transpose bool, del bool) (retVal *Tensor) { - - retVal, err := ts.OrmqrOut(out, input2, input3, left, transpose, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustPairwiseDistance(x1 *Tensor, x2 *Tensor, p float64, eps float64, keepdim bool) (retVal *Tensor) { - - retVal, err := PairwiseDistance(x1, x2, p, eps, keepdim) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustPdist(p float64, del bool) (retVal *Tensor) { - - retVal, err := ts.Pdist(p, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustPermute(dims []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.Permute(dims, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustPinMemory(del bool) (retVal *Tensor) { - - retVal, err := ts.PinMemory(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustPinverse(rcond float64, del bool) (retVal *Tensor) { - - retVal, err := ts.Pinverse(rcond, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustPixelShuffle(upscaleFactor int64, del bool) (retVal *Tensor) { - - retVal, err := ts.PixelShuffle(upscaleFactor, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustPoisson(del bool) (retVal *Tensor) { - - retVal, err := ts.Poisson(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustPoissonNllLoss(input *Tensor, target *Tensor, logInput bool, full bool, eps float64, reduction int64) (retVal *Tensor) { - - retVal, err := PoissonNllLoss(input, target, logInput, full, eps, reduction) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustPolygamma(n int64, del bool) (retVal *Tensor) { - - retVal, err := ts.Polygamma(n, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustPolygamma_(n int64) { - - err := ts.Polygamma_(n) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustPolygammaOut(out *Tensor, n int64, del bool) (retVal *Tensor) { - - retVal, err := ts.PolygammaOut(out, n, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustPow(exponent *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.Pow(exponent, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustPow1(exponent *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Pow1(exponent, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustPow2(selfScalar *Scalar, exponent *Tensor) (retVal *Tensor) { - - retVal, err := Pow2(selfScalar, exponent) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustPow_(exponent *Scalar) { - - err := ts.Pow_(exponent) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustPow1_(exponent *Tensor) { - - err := ts.Pow1_(exponent) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustPowOut(out *Tensor, exponent *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.PowOut(out, exponent, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustPowOut1(out *Tensor, exponent *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.PowOut1(out, exponent, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustPowOut2(out *Tensor, selfScalar *Scalar, exponent *Tensor) (retVal *Tensor) { - - retVal, err := PowOut2(out, selfScalar, exponent) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustPrelu(weight *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Prelu(weight, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustProd(dtype gotch.DType, del bool) (retVal *Tensor) { - - retVal, err := ts.Prod(dtype, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustProd1(dim int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor) { - - retVal, err := ts.Prod1(dim, keepdim, dtype, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustProdOut(out *Tensor, dim int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor) { - - retVal, err := ts.ProdOut(out, dim, keepdim, dtype, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustPut_(index *Tensor, source *Tensor, accumulate bool) { - - err := ts.Put_(index, source, accumulate) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustQPerChannelScales(del bool) (retVal *Tensor) { - - retVal, err := ts.QPerChannelScales(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustQPerChannelZeroPoints(del bool) (retVal *Tensor) { - - retVal, err := ts.QPerChannelZeroPoints(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustQuantizePerChannel(scales *Tensor, zeroPoints *Tensor, axis int64, dtype gotch.DType, del bool) (retVal *Tensor) { - - retVal, err := ts.QuantizePerChannel(scales, zeroPoints, axis, dtype, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustQuantizePerTensor(scale float64, zeroPoint int64, dtype gotch.DType, del bool) (retVal *Tensor) { - - retVal, err := ts.QuantizePerTensor(scale, zeroPoint, dtype, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustQuantizedBatchNorm(input *Tensor, weight *Tensor, bias *Tensor, mean *Tensor, vari *Tensor, eps float64, outputScale float64, outputZeroPoint int64) (retVal *Tensor) { - - retVal, err := QuantizedBatchNorm(input, weight, bias, mean, vari, eps, outputScale, outputZeroPoint) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustQuantizedGruCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor, packedIh *Tensor, packedHh *Tensor, colOffsetsIh *Tensor, colOffsetsHh *Tensor, scaleIh *Scalar, scaleHh *Scalar, zeroPointIh *Scalar, zeroPointHh *Scalar) (retVal *Tensor) { - - retVal, err := QuantizedGruCell(input, hx, wIh, wHh, bIh, bHh, packedIh, packedHh, colOffsetsIh, colOffsetsHh, scaleIh, scaleHh, zeroPointIh, zeroPointHh) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustQuantizedMaxPool2d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool) (retVal *Tensor) { - - retVal, err := ts.QuantizedMaxPool2d(kernelSize, stride, padding, dilation, ceilMode, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustQuantizedRnnReluCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor, packedIh *Tensor, packedHh *Tensor, colOffsetsIh *Tensor, colOffsetsHh *Tensor, scaleIh *Scalar, scaleHh *Scalar, zeroPointIh *Scalar, zeroPointHh *Scalar) (retVal *Tensor) { - - retVal, err := QuantizedRnnReluCell(input, hx, wIh, wHh, bIh, bHh, packedIh, packedHh, colOffsetsIh, colOffsetsHh, scaleIh, scaleHh, zeroPointIh, zeroPointHh) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustQuantizedRnnTanhCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor, packedIh *Tensor, packedHh *Tensor, colOffsetsIh *Tensor, colOffsetsHh *Tensor, scaleIh *Scalar, scaleHh *Scalar, zeroPointIh *Scalar, zeroPointHh *Scalar) (retVal *Tensor) { - - retVal, err := QuantizedRnnTanhCell(input, hx, wIh, wHh, bIh, bHh, packedIh, packedHh, colOffsetsIh, colOffsetsHh, scaleIh, scaleHh, zeroPointIh, zeroPointHh) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustRand(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := Rand(size, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustRandLike(del bool) (retVal *Tensor) { - - retVal, err := ts.RandLike(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustRandOut(out *Tensor, size []int64) (retVal *Tensor) { - - retVal, err := RandOut(out, size) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustRandint(high int64, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := Randint(high, size, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustRandint1(low int64, high int64, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := Randint1(low, high, size, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustRandintLike(high int64, del bool) (retVal *Tensor) { - - retVal, err := ts.RandintLike(high, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustRandintLike1(low int64, high int64, del bool) (retVal *Tensor) { - - retVal, err := ts.RandintLike1(low, high, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustRandintOut(out *Tensor, high int64, size []int64) (retVal *Tensor) { - - retVal, err := RandintOut(out, high, size) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustRandintOut1(out *Tensor, low int64, high int64, size []int64) (retVal *Tensor) { - - retVal, err := RandintOut1(out, low, high, size) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustRandn(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := Randn(size, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustRandnLike(del bool) (retVal *Tensor) { - - retVal, err := ts.RandnLike(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustRandnOut(out *Tensor, size []int64) (retVal *Tensor) { - - retVal, err := RandnOut(out, size) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustRandom_() { - - err := ts.Random_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustRandom1_(to int64) { - - err := ts.Random1_(to) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustRandom2(from int64, to int64) { - - err := ts.Random2(from, to) - if err != nil { - log.Fatal(err) - } - - return -} - -func MustRandperm(n int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := Randperm(n, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustRandpermOut(out *Tensor, n int64) (retVal *Tensor) { - - retVal, err := RandpermOut(out, n) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustRange(start *Scalar, end *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := Range(start, end, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustRange1(start *Scalar, end *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := Range1(start, end, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustRangeOut(out *Tensor, start *Scalar, end *Scalar) (retVal *Tensor) { - - retVal, err := RangeOut(out, start, end) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustReal(del bool) (retVal *Tensor) { - - retVal, err := ts.Real(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustReciprocal(del bool) (retVal *Tensor) { - - retVal, err := ts.Reciprocal(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustReciprocal_() { - - err := ts.Reciprocal_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustReciprocalOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.ReciprocalOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustReflectionPad1d(padding []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.ReflectionPad1d(padding, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustReflectionPad1dBackward(gradOutput *Tensor, padding []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.ReflectionPad1dBackward(gradOutput, padding, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustReflectionPad1dBackwardOut(gradInput *Tensor, gradOutput *Tensor, padding []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.ReflectionPad1dBackwardOut(gradInput, gradOutput, padding, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustReflectionPad1dOut(out *Tensor, padding []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.ReflectionPad1dOut(out, padding, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustReflectionPad2d(padding []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.ReflectionPad2d(padding, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustReflectionPad2dBackward(gradOutput *Tensor, padding []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.ReflectionPad2dBackward(gradOutput, padding, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustReflectionPad2dBackwardOut(gradInput *Tensor, gradOutput *Tensor, padding []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.ReflectionPad2dBackwardOut(gradInput, gradOutput, padding, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustReflectionPad2dOut(out *Tensor, padding []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.ReflectionPad2dOut(out, padding, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustRelu(del bool) (retVal *Tensor) { - - retVal, err := ts.Relu(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustRelu_() { - - err := ts.Relu_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustRemainder(other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.Remainder(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustRemainder1(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Remainder1(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustRemainder_(other *Scalar) { - - err := ts.Remainder_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustRemainder1_(other *Tensor) { - - err := ts.Remainder1_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustRemainderOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.RemainderOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustRemainderOut1(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.RemainderOut1(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustRenorm(p *Scalar, dim int64, maxnorm *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.Renorm(p, dim, maxnorm, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustRenorm_(p *Scalar, dim int64, maxnorm *Scalar) { - - err := ts.Renorm_(p, dim, maxnorm) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustRenormOut(out *Tensor, p *Scalar, dim int64, maxnorm *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.RenormOut(out, p, dim, maxnorm, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustRepeat(repeats []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.Repeat(repeats, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustRepeatInterleave(repeats *Tensor) (retVal *Tensor) { - - retVal, err := RepeatInterleave(repeats) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustRepeatInterleave1(repeats *Tensor, dim int64, del bool) (retVal *Tensor) { - - retVal, err := ts.RepeatInterleave1(repeats, dim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustRepeatInterleave2(repeats int64, dim int64, del bool) (retVal *Tensor) { - - retVal, err := ts.RepeatInterleave2(repeats, dim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustReplicationPad1d(padding []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.ReplicationPad1d(padding, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustReplicationPad1dBackward(gradOutput *Tensor, padding []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.ReplicationPad1dBackward(gradOutput, padding, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustReplicationPad1dBackwardOut(gradInput *Tensor, gradOutput *Tensor, padding []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.ReplicationPad1dBackwardOut(gradInput, gradOutput, padding, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustReplicationPad1dOut(out *Tensor, padding []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.ReplicationPad1dOut(out, padding, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustReplicationPad2d(padding []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.ReplicationPad2d(padding, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustReplicationPad2dBackward(gradOutput *Tensor, padding []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.ReplicationPad2dBackward(gradOutput, padding, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustReplicationPad2dBackwardOut(gradInput *Tensor, gradOutput *Tensor, padding []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.ReplicationPad2dBackwardOut(gradInput, gradOutput, padding, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustReplicationPad2dOut(out *Tensor, padding []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.ReplicationPad2dOut(out, padding, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustReplicationPad3d(padding []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.ReplicationPad3d(padding, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustReplicationPad3dBackward(gradOutput *Tensor, padding []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.ReplicationPad3dBackward(gradOutput, padding, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustReplicationPad3dBackwardOut(gradInput *Tensor, gradOutput *Tensor, padding []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.ReplicationPad3dBackwardOut(gradInput, gradOutput, padding, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustReplicationPad3dOut(out *Tensor, padding []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.ReplicationPad3dOut(out, padding, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustRequiresGrad_(requiresGrad bool) { - - err := ts.RequiresGrad_(requiresGrad) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustReshape(shape []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.Reshape(shape, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustReshapeAs(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.ReshapeAs(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustResize_(size []int64) { - - err := ts.Resize_(size) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustResizeAs_(theTemplate *Tensor) { - - err := ts.ResizeAs_(theTemplate) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustRfft(signalNdim int64, normalized bool, onesided bool, del bool) (retVal *Tensor) { - - retVal, err := ts.Rfft(signalNdim, normalized, onesided, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustRnnReluCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor) (retVal *Tensor) { - - retVal, err := RnnReluCell(input, hx, wIh, wHh, bIh, bHh) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustRnnTanhCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor) (retVal *Tensor) { - - retVal, err := RnnTanhCell(input, hx, wIh, wHh, bIh, bHh) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustRoll(shifts []int64, dims []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.Roll(shifts, dims, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustRot90(k int64, dims []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.Rot90(k, dims, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustRound(del bool) (retVal *Tensor) { - - retVal, err := ts.Round(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustRound_() { - - err := ts.Round_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustRoundOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.RoundOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustRrelu(training bool, del bool) (retVal *Tensor) { - - retVal, err := ts.Rrelu(training, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustRrelu_(training bool) { - - err := ts.Rrelu_(training) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustRreluWithNoise(noise *Tensor, training bool, del bool) (retVal *Tensor) { - - retVal, err := ts.RreluWithNoise(noise, training, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustRreluWithNoise_(noise *Tensor, training bool) { - - err := ts.RreluWithNoise_(noise, training) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustRreluWithNoiseBackward(gradOutput *Tensor, noise *Tensor, lower *Scalar, upper *Scalar, training bool, selfIsResult bool, del bool) (retVal *Tensor) { - - retVal, err := ts.RreluWithNoiseBackward(gradOutput, noise, lower, upper, training, selfIsResult, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustRreluWithNoiseOut(out *Tensor, noise *Tensor, training bool, del bool) (retVal *Tensor) { - - retVal, err := ts.RreluWithNoiseOut(out, noise, training, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustRsqrt(del bool) (retVal *Tensor) { - - retVal, err := ts.Rsqrt(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustRsqrt_() { - - err := ts.Rsqrt_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustRsqrtOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.RsqrtOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustRsub(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Rsub(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustRsub1(other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.Rsub1(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustScalarTensor(s *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := ScalarTensor(s, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustScatter(dim int64, index *Tensor, src *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Scatter(dim, index, src, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustScatter1(dim int64, index *Tensor, value *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.Scatter1(dim, index, value, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustScatter_(dim int64, index *Tensor, src *Tensor) { - - err := ts.Scatter_(dim, index, src) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustScatter1_(dim int64, index *Tensor, value *Scalar) { - - err := ts.Scatter1_(dim, index, value) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustScatterAdd(dim int64, index *Tensor, src *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.ScatterAdd(dim, index, src, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustScatterAdd_(dim int64, index *Tensor, src *Tensor) { - - err := ts.ScatterAdd_(dim, index, src) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustSelect(dim int64, index int64, del bool) (retVal *Tensor) { - - retVal, err := ts.Select(dim, index, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSelu(del bool) (retVal *Tensor) { - - retVal, err := ts.Selu(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSelu_() { - - err := ts.Selu_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustSet_() { - - err := ts.Set_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustSet1_(source *Tensor) { - - err := ts.Set1_(source) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustSetRequiresGrad(r bool, del bool) (retVal *Tensor) { - - retVal, err := ts.SetRequiresGrad(r, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSigmoid(del bool) (retVal *Tensor) { - - retVal, err := ts.Sigmoid(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSigmoid_() { - - err := ts.Sigmoid_() - if err != nil { - log.Fatal(err) - } - - return -} - -func MustSigmoidBackward(gradOutput *Tensor, output *Tensor) (retVal *Tensor) { - - retVal, err := SigmoidBackward(gradOutput, output) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustSigmoidBackwardOut(gradInput *Tensor, gradOutput *Tensor, output *Tensor) (retVal *Tensor) { - - retVal, err := SigmoidBackwardOut(gradInput, gradOutput, output) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSigmoidOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.SigmoidOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSign(del bool) (retVal *Tensor) { - - retVal, err := ts.Sign(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSign_() { - - err := ts.Sign_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustSignOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.SignOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSin(del bool) (retVal *Tensor) { - - retVal, err := ts.Sin(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSin_() { - - err := ts.Sin_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustSinOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.SinOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSinh(del bool) (retVal *Tensor) { - - retVal, err := ts.Sinh(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSinh_() { - - err := ts.Sinh_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustSinhOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.SinhOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSlice(dim int64, start int64, end int64, step int64, del bool) (retVal *Tensor) { - - retVal, err := ts.Slice(dim, start, end, step, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSlowConv3d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.SlowConv3d(weight, kernelSize, bias, stride, padding, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSlowConv3dOut(out *Tensor, weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.SlowConv3dOut(out, weight, kernelSize, bias, stride, padding, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSlowConvDilated2d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, dilation []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.SlowConvDilated2d(weight, kernelSize, bias, stride, padding, dilation, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSlowConvDilated3d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, dilation []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.SlowConvDilated3d(weight, kernelSize, bias, stride, padding, dilation, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSlowConvTranspose2d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, dilation []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.SlowConvTranspose2d(weight, kernelSize, bias, stride, padding, outputPadding, dilation, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSlowConvTranspose2dOut(out *Tensor, weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, dilation []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.SlowConvTranspose2dOut(out, weight, kernelSize, bias, stride, padding, outputPadding, dilation, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSlowConvTranspose3d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, dilation []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.SlowConvTranspose3d(weight, kernelSize, bias, stride, padding, outputPadding, dilation, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSlowConvTranspose3dOut(out *Tensor, weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, dilation []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.SlowConvTranspose3dOut(out, weight, kernelSize, bias, stride, padding, outputPadding, dilation, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSmm(mat2 *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Smm(mat2, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSmoothL1Loss(target *Tensor, reduction int64, del bool) (retVal *Tensor) { - - retVal, err := ts.SmoothL1Loss(target, reduction, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSmoothL1LossBackward(gradOutput *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor) { - - retVal, err := ts.SmoothL1LossBackward(gradOutput, target, reduction, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSmoothL1LossBackwardOut(gradInput *Tensor, gradOutput *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor) { - - retVal, err := ts.SmoothL1LossBackwardOut(gradInput, gradOutput, target, reduction, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSmoothL1LossOut(out *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor) { - - retVal, err := ts.SmoothL1LossOut(out, target, reduction, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSoftMarginLoss(target *Tensor, reduction int64, del bool) (retVal *Tensor) { - - retVal, err := ts.SoftMarginLoss(target, reduction, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSoftMarginLossBackward(gradOutput *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor) { - - retVal, err := ts.SoftMarginLossBackward(gradOutput, target, reduction, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSoftMarginLossBackwardOut(gradInput *Tensor, gradOutput *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor) { - - retVal, err := ts.SoftMarginLossBackwardOut(gradInput, gradOutput, target, reduction, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSoftMarginLossOut(out *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor) { - - retVal, err := ts.SoftMarginLossOut(out, target, reduction, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSoftmax(dim int64, dtype gotch.DType, del bool) (retVal *Tensor) { - - retVal, err := ts.Softmax(dim, dtype, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSoftplus(del bool) (retVal *Tensor) { - - retVal, err := ts.Softplus(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSoftplusBackward(gradOutput *Tensor, beta *Scalar, threshold *Scalar, output *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.SoftplusBackward(gradOutput, beta, threshold, output, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSoftplusBackwardOut(gradInput *Tensor, gradOutput *Tensor, beta *Scalar, threshold *Scalar, output *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.SoftplusBackwardOut(gradInput, gradOutput, beta, threshold, output, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSoftplusOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.SoftplusOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSoftshrink(del bool) (retVal *Tensor) { - - retVal, err := ts.Softshrink(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSoftshrinkBackward(gradOutput *Tensor, lambd *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.SoftshrinkBackward(gradOutput, lambd, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSoftshrinkBackwardOut(gradInput *Tensor, gradOutput *Tensor, lambd *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.SoftshrinkBackwardOut(gradInput, gradOutput, lambd, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSoftshrinkOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.SoftshrinkOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustSparseCooTensor(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := SparseCooTensor(size, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustSparseCooTensor1(indices *Tensor, values *Tensor, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := SparseCooTensor1(indices, values, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustSparseCooTensor2(indices *Tensor, values *Tensor, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := SparseCooTensor2(indices, values, size, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSparseMask(mask *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.SparseMask(mask, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSparseResize_(size []int64, sparseDim int64, denseDim int64) { - - err := ts.SparseResize_(size, sparseDim, denseDim) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustSparseResizeAndClear_(size []int64, sparseDim int64, denseDim int64) { - - err := ts.SparseResizeAndClear_(size, sparseDim, denseDim) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustSqrt(del bool) (retVal *Tensor) { - - retVal, err := ts.Sqrt(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSqrt_() { - - err := ts.Sqrt_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustSqrtOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.SqrtOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSquare(del bool) (retVal *Tensor) { - - retVal, err := ts.Square(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSquare_() { - - err := ts.Square_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustSqueeze(del bool) (retVal *Tensor) { - - retVal, err := ts.Squeeze(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSqueeze1(dim int64, del bool) (retVal *Tensor) { - - retVal, err := ts.Squeeze1(dim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSqueeze_() { - - err := ts.Squeeze_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustSqueeze1_(dim int64) { - - err := ts.Squeeze1_(dim) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustSspaddmm(mat1 *Tensor, mat2 *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Sspaddmm(mat1, mat2, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSspaddmmOut(out *Tensor, mat1 *Tensor, mat2 *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.SspaddmmOut(out, mat1, mat2, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustStack(tensors []Tensor, dim int64) (retVal *Tensor) { - - retVal, err := Stack(tensors, dim) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustStackOut(out *Tensor, tensors []Tensor, dim int64) (retVal *Tensor) { - - retVal, err := StackOut(out, tensors, dim) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustStd(unbiased bool, del bool) (retVal *Tensor) { - - retVal, err := ts.Std(unbiased, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustStd1(dim []int64, unbiased bool, keepdim bool, del bool) (retVal *Tensor) { - - retVal, err := ts.Std1(dim, unbiased, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustStdOut(out *Tensor, dim []int64, unbiased bool, keepdim bool, del bool) (retVal *Tensor) { - - retVal, err := ts.StdOut(out, dim, unbiased, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustStft(nFft int64, hopLength int64, winLength int64, window *Tensor, normalized bool, onesided bool, del bool) (retVal *Tensor) { - - retVal, err := ts.Stft(nFft, hopLength, winLength, window, normalized, onesided, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSub(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Sub(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSub1(other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.Sub1(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSub_(other *Tensor) { - - err := ts.Sub_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustSub1_(other *Scalar) { - - err := ts.Sub1_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustSubOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.SubOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSum(dtype gotch.DType, del bool) (retVal *Tensor) { - - retVal, err := ts.Sum(dtype, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSum1(dim []int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor) { - - retVal, err := ts.Sum1(dim, keepdim, dtype, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSumOut(out *Tensor, dim []int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor) { - - retVal, err := ts.SumOut(out, dim, keepdim, dtype, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustSumToSize(size []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.SumToSize(size, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustT(del bool) (retVal *Tensor) { - - retVal, err := ts.T(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustT_() { - - err := ts.T_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustTake(index *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Take(index, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustTakeOut(out *Tensor, index *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.TakeOut(out, index, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustTan(del bool) (retVal *Tensor) { - - retVal, err := ts.Tan(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustTan_() { - - err := ts.Tan_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustTanOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.TanOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustTanh(del bool) (retVal *Tensor) { - - retVal, err := ts.Tanh(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustTanh_() { - - err := ts.Tanh_() - if err != nil { - log.Fatal(err) - } - - return -} - -func MustTanhBackward(gradOutput *Tensor, output *Tensor) (retVal *Tensor) { - - retVal, err := TanhBackward(gradOutput, output) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustTanhBackwardOut(gradInput *Tensor, gradOutput *Tensor, output *Tensor) (retVal *Tensor) { - - retVal, err := TanhBackwardOut(gradInput, gradOutput, output) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustTanhOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.TanhOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustTensordot(other *Tensor, dimsSelf []int64, dimsOther []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.Tensordot(other, dimsSelf, dimsOther, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustThreshold(threshold *Scalar, value *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.Threshold(threshold, value, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustThreshold_(threshold *Scalar, value *Scalar) { - - err := ts.Threshold_(threshold, value) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustThresholdBackward(gradOutput *Tensor, threshold *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.ThresholdBackward(gradOutput, threshold, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustThresholdOut(out *Tensor, threshold *Scalar, value *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.ThresholdOut(out, threshold, value, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustTo(device gotch.Device, del bool) (retVal *Tensor) { - - retVal, err := ts.To(device, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustTo1(optionsKind gotch.DType, optionsDevice gotch.Device, nonBlocking bool, copy bool, del bool) (retVal *Tensor) { - - retVal, err := ts.To1(optionsKind, optionsDevice, nonBlocking, copy, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustTo2(dtype gotch.DType, nonBlocking bool, copy bool, del bool) (retVal *Tensor) { - - retVal, err := ts.To2(dtype, nonBlocking, copy, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustTo3(other *Tensor, nonBlocking bool, copy bool, del bool) (retVal *Tensor) { - - retVal, err := ts.To3(other, nonBlocking, copy, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustTo4(device gotch.Device, dtype gotch.DType, nonBlocking bool, copy bool, del bool) (retVal *Tensor) { - - retVal, err := ts.To4(device, dtype, nonBlocking, copy, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustToDense(del bool) (retVal *Tensor) { - - retVal, err := ts.ToDense(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustToDenseBackward(grad *Tensor, input *Tensor) (retVal *Tensor) { - - retVal, err := ToDenseBackward(grad, input) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustToMkldnn(del bool) (retVal *Tensor) { - - retVal, err := ts.ToMkldnn(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustToMkldnnBackward(grad *Tensor, input *Tensor) (retVal *Tensor) { - - retVal, err := ToMkldnnBackward(grad, input) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustToSparse(del bool) (retVal *Tensor) { - - retVal, err := ts.ToSparse(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustToSparse1(sparseDim int64, del bool) (retVal *Tensor) { - - retVal, err := ts.ToSparse1(sparseDim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustTotype(scalarType gotch.DType, del bool) (retVal *Tensor) { - - retVal, err := ts.Totype(scalarType, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustTrace(del bool) (retVal *Tensor) { - - retVal, err := ts.Trace(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustTranspose(dim0 int64, dim1 int64, del bool) (retVal *Tensor) { - - retVal, err := ts.Transpose(dim0, dim1, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustTranspose_(dim0 int64, dim1 int64) { - - err := ts.Transpose_(dim0, dim1) - if err != nil { - log.Fatal(err) - } - - return -} - -func MustTrapz(y *Tensor, x *Tensor, dim int64) (retVal *Tensor) { - - retVal, err := Trapz(y, x, dim) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustTrapz1(y *Tensor, dx float64, dim int64) (retVal *Tensor) { - - retVal, err := Trapz1(y, dx, dim) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustTril(diagonal int64, del bool) (retVal *Tensor) { - - retVal, err := ts.Tril(diagonal, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustTril_(diagonal int64) { - - err := ts.Tril_(diagonal) - if err != nil { - log.Fatal(err) - } - - return -} - -func MustTrilIndices(row int64, col int64, offset int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := TrilIndices(row, col, offset, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustTrilOut(out *Tensor, diagonal int64, del bool) (retVal *Tensor) { - - retVal, err := ts.TrilOut(out, diagonal, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustTripletMarginLoss(anchor *Tensor, positive *Tensor, negative *Tensor, margin float64, p float64, eps float64, swap bool, reduction int64) (retVal *Tensor) { - - retVal, err := TripletMarginLoss(anchor, positive, negative, margin, p, eps, swap, reduction) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustTriu(diagonal int64, del bool) (retVal *Tensor) { - - retVal, err := ts.Triu(diagonal, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustTriu_(diagonal int64) { - - err := ts.Triu_(diagonal) - if err != nil { - log.Fatal(err) - } - - return -} - -func MustTriuIndices(row int64, col int64, offset int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := TriuIndices(row, col, offset, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustTriuOut(out *Tensor, diagonal int64, del bool) (retVal *Tensor) { - - retVal, err := ts.TriuOut(out, diagonal, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustTrueDivide(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.TrueDivide(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustTrueDivide1(other *Scalar, del bool) (retVal *Tensor) { - - retVal, err := ts.TrueDivide1(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustTrueDivide_(other *Tensor) { - - err := ts.TrueDivide_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustTrueDivide1_(other *Scalar) { - - err := ts.TrueDivide1_(other) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustTrueDivideOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.TrueDivideOut(out, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustTrunc(del bool) (retVal *Tensor) { - - retVal, err := ts.Trunc(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustTrunc_() { - - err := ts.Trunc_() - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustTruncOut(out *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.TruncOut(out, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustTypeAs(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.TypeAs(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustUnfold(dimension int64, size int64, step int64, del bool) (retVal *Tensor) { - - retVal, err := ts.Unfold(dimension, size, step, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustUniform_(from float64, to float64) { - - err := ts.Uniform_(from, to) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustUnsqueeze(dim int64, del bool) (retVal *Tensor) { - - retVal, err := ts.Unsqueeze(dim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustUnsqueeze_(dim int64) { - - err := ts.Unsqueeze_(dim) - if err != nil { - log.Fatal(err) - } - - return -} - -func (ts *Tensor) MustUpsampleBicubic2d(outputSize []int64, alignCorners bool, scalesH float64, scalesW float64, del bool) (retVal *Tensor) { - - retVal, err := ts.UpsampleBicubic2d(outputSize, alignCorners, scalesH, scalesW, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustUpsampleBicubic2dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH float64, scalesW float64) (retVal *Tensor) { - - retVal, err := UpsampleBicubic2dBackward(gradOutput, outputSize, inputSize, alignCorners, scalesH, scalesW) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustUpsampleBicubic2dBackwardOut(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH float64, scalesW float64) (retVal *Tensor) { - - retVal, err := UpsampleBicubic2dBackwardOut(gradInput, gradOutput, outputSize, inputSize, alignCorners, scalesH, scalesW) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustUpsampleBicubic2dOut(out *Tensor, outputSize []int64, alignCorners bool, scalesH float64, scalesW float64, del bool) (retVal *Tensor) { - - retVal, err := ts.UpsampleBicubic2dOut(out, outputSize, alignCorners, scalesH, scalesW, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustUpsampleBilinear2d(outputSize []int64, alignCorners bool, scalesH float64, scalesW float64, del bool) (retVal *Tensor) { - - retVal, err := ts.UpsampleBilinear2d(outputSize, alignCorners, scalesH, scalesW, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustUpsampleBilinear2dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH float64, scalesW float64) (retVal *Tensor) { - - retVal, err := UpsampleBilinear2dBackward(gradOutput, outputSize, inputSize, alignCorners, scalesH, scalesW) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustUpsampleBilinear2dBackwardOut(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH float64, scalesW float64) (retVal *Tensor) { - - retVal, err := UpsampleBilinear2dBackwardOut(gradInput, gradOutput, outputSize, inputSize, alignCorners, scalesH, scalesW) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustUpsampleBilinear2dOut(out *Tensor, outputSize []int64, alignCorners bool, scalesH float64, scalesW float64, del bool) (retVal *Tensor) { - - retVal, err := ts.UpsampleBilinear2dOut(out, outputSize, alignCorners, scalesH, scalesW, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustUpsampleLinear1d(outputSize []int64, alignCorners bool, scales float64, del bool) (retVal *Tensor) { - - retVal, err := ts.UpsampleLinear1d(outputSize, alignCorners, scales, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustUpsampleLinear1dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scales float64) (retVal *Tensor) { - - retVal, err := UpsampleLinear1dBackward(gradOutput, outputSize, inputSize, alignCorners, scales) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustUpsampleLinear1dBackwardOut(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scales float64) (retVal *Tensor) { - - retVal, err := UpsampleLinear1dBackwardOut(gradInput, gradOutput, outputSize, inputSize, alignCorners, scales) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustUpsampleLinear1dOut(out *Tensor, outputSize []int64, alignCorners bool, scales float64, del bool) (retVal *Tensor) { - - retVal, err := ts.UpsampleLinear1dOut(out, outputSize, alignCorners, scales, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustUpsampleNearest1d(outputSize []int64, scales float64, del bool) (retVal *Tensor) { - - retVal, err := ts.UpsampleNearest1d(outputSize, scales, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustUpsampleNearest1dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, scales float64) (retVal *Tensor) { - - retVal, err := UpsampleNearest1dBackward(gradOutput, outputSize, inputSize, scales) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustUpsampleNearest1dBackwardOut(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, scales float64) (retVal *Tensor) { - - retVal, err := UpsampleNearest1dBackwardOut(gradInput, gradOutput, outputSize, inputSize, scales) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustUpsampleNearest1dOut(out *Tensor, outputSize []int64, scales float64, del bool) (retVal *Tensor) { - - retVal, err := ts.UpsampleNearest1dOut(out, outputSize, scales, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustUpsampleNearest2d(outputSize []int64, scalesH float64, scalesW float64, del bool) (retVal *Tensor) { - - retVal, err := ts.UpsampleNearest2d(outputSize, scalesH, scalesW, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustUpsampleNearest2dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesH float64, scalesW float64) (retVal *Tensor) { - - retVal, err := UpsampleNearest2dBackward(gradOutput, outputSize, inputSize, scalesH, scalesW) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustUpsampleNearest2dBackwardOut(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesH float64, scalesW float64) (retVal *Tensor) { - - retVal, err := UpsampleNearest2dBackwardOut(gradInput, gradOutput, outputSize, inputSize, scalesH, scalesW) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustUpsampleNearest2dOut(out *Tensor, outputSize []int64, scalesH float64, scalesW float64, del bool) (retVal *Tensor) { - - retVal, err := ts.UpsampleNearest2dOut(out, outputSize, scalesH, scalesW, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustUpsampleNearest3d(outputSize []int64, scalesD float64, scalesH float64, scalesW float64, del bool) (retVal *Tensor) { - - retVal, err := ts.UpsampleNearest3d(outputSize, scalesD, scalesH, scalesW, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustUpsampleNearest3dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesD float64, scalesH float64, scalesW float64) (retVal *Tensor) { - - retVal, err := UpsampleNearest3dBackward(gradOutput, outputSize, inputSize, scalesD, scalesH, scalesW) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustUpsampleNearest3dBackwardOut(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesD float64, scalesH float64, scalesW float64) (retVal *Tensor) { - - retVal, err := UpsampleNearest3dBackwardOut(gradInput, gradOutput, outputSize, inputSize, scalesD, scalesH, scalesW) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustUpsampleNearest3dOut(out *Tensor, outputSize []int64, scalesD float64, scalesH float64, scalesW float64, del bool) (retVal *Tensor) { - - retVal, err := ts.UpsampleNearest3dOut(out, outputSize, scalesD, scalesH, scalesW, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustUpsampleTrilinear3d(outputSize []int64, alignCorners bool, scalesD float64, scalesH float64, scalesW float64, del bool) (retVal *Tensor) { - - retVal, err := ts.UpsampleTrilinear3d(outputSize, alignCorners, scalesD, scalesH, scalesW, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustUpsampleTrilinear3dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesD float64, scalesH float64, scalesW float64) (retVal *Tensor) { - - retVal, err := UpsampleTrilinear3dBackward(gradOutput, outputSize, inputSize, alignCorners, scalesD, scalesH, scalesW) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustUpsampleTrilinear3dBackwardOut(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesD float64, scalesH float64, scalesW float64) (retVal *Tensor) { - - retVal, err := UpsampleTrilinear3dBackwardOut(gradInput, gradOutput, outputSize, inputSize, alignCorners, scalesD, scalesH, scalesW) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustUpsampleTrilinear3dOut(out *Tensor, outputSize []int64, alignCorners bool, scalesD float64, scalesH float64, scalesW float64, del bool) (retVal *Tensor) { - - retVal, err := ts.UpsampleTrilinear3dOut(out, outputSize, alignCorners, scalesD, scalesH, scalesW, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustValues(del bool) (retVal *Tensor) { - - retVal, err := ts.Values(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustVar(unbiased bool, del bool) (retVal *Tensor) { - - retVal, err := ts.Var(unbiased, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustVar1(dim []int64, unbiased bool, keepdim bool, del bool) (retVal *Tensor) { - - retVal, err := ts.Var1(dim, unbiased, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustVarOut(out *Tensor, dim []int64, unbiased bool, keepdim bool, del bool) (retVal *Tensor) { - - retVal, err := ts.VarOut(out, dim, unbiased, keepdim, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustView(size []int64, del bool) (retVal *Tensor) { - - retVal, err := ts.View(size, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustViewAs(other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.ViewAs(other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustWhere1(condition *Tensor, other *Tensor, del bool) (retVal *Tensor) { - - retVal, err := ts.Where1(condition, other, del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustZero_() { - - err := ts.Zero_() - if err != nil { - log.Fatal(err) - } - - return -} - -func MustZeros(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor) { - - retVal, err := Zeros(size, optionsKind, optionsDevice) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func (ts *Tensor) MustZerosLike(del bool) (retVal *Tensor) { - - retVal, err := ts.ZerosLike(del) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -func MustZerosOut(out *Tensor, size []int64) (retVal *Tensor) { - - retVal, err := ZerosOut(out, size) - if err != nil { - log.Fatal(err) - } - - return retVal -} - -// End of implementing Tensor ================================= +func(ts *Tensor) Must__And_(other *Scalar)() { + + err := ts.__And_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) Must__And1(other *Tensor)() { + + err := ts.__And1(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) Must__Iand_(other *Scalar)() { + + err := ts.__Iand_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) Must__Iand1(other *Tensor)() { + + err := ts.__Iand1(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) Must__Ilshift_(other *Scalar)() { + + err := ts.__Ilshift_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) Must__Ilshift1(other *Tensor)() { + + err := ts.__Ilshift1(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) Must__Ior_(other *Scalar)() { + + err := ts.__Ior_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) Must__Ior1(other *Tensor)() { + + err := ts.__Ior1(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) Must__Irshift_(other *Scalar)() { + + err := ts.__Irshift_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) Must__Irshift1(other *Tensor)() { + + err := ts.__Irshift1(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) Must__Ixor_(other *Scalar)() { + + err := ts.__Ixor_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) Must__Ixor1(other *Tensor)() { + + err := ts.__Ixor1(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) Must__Lshift_(other *Scalar)() { + + err := ts.__Lshift_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) Must__Lshift1(other *Tensor)() { + + err := ts.__Lshift1(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) Must__Or_(other *Scalar)() { + + err := ts.__Or_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) Must__Or1(other *Tensor)() { + + err := ts.__Or1(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) Must__Rshift_(other *Scalar)() { + + err := ts.__Rshift_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) Must__Rshift1(other *Tensor)() { + + err := ts.__Rshift1(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) Must__Xor_(other *Scalar)() { + + err := ts.__Xor_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) Must__Xor1(other *Tensor)() { + + err := ts.__Xor1(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) Must_AdaptiveAvgPool2d(outputSize []int64, del bool)(retVal *Tensor) { + + retVal, err := ts._AdaptiveAvgPool2d(outputSize, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_AdaptiveAvgPool2dBackward(gradOutput *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts._AdaptiveAvgPool2dBackward(gradOutput, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_AddBatchDim(batchDim int64, level int64, del bool)(retVal *Tensor) { + + retVal, err := ts._AddBatchDim(batchDim, level, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_AddRelu(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts._AddRelu(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_AddRelu_(other *Tensor)() { + + err := ts._AddRelu_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) Must_AddReluOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts._AddReluOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_AddmvImpl_(self2 *Tensor, mat *Tensor, vec *Tensor)() { + + err := ts._AddmvImpl_(self2, mat, vec) + if err != nil { log.Fatal(err) } + + return +} + +func Must_AmpUpdateScale(growthTracker *Tensor, currentScale *Tensor, foundInf *Tensor, scaleGrowthFactor float64, scaleBackoffFactor float64, growthInterval int64)(retVal *Tensor) { + + retVal, err := _AmpUpdateScale(growthTracker, currentScale, foundInf, scaleGrowthFactor, scaleBackoffFactor, growthInterval) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_BaddbmmMkl_(batch1 *Tensor, batch2 *Tensor)() { + + err := ts._BaddbmmMkl_(batch1, batch2) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) Must_Bmm(mat2 *Tensor, deterministic bool, del bool)(retVal *Tensor) { + + retVal, err := ts._Bmm(mat2, deterministic, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_BmmOut(out *Tensor, mat2 *Tensor, deterministic bool, del bool)(retVal *Tensor) { + + retVal, err := ts._BmmOut(out, mat2, deterministic, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_CastByte(nonBlocking bool, del bool)(retVal *Tensor) { + + retVal, err := ts._CastByte(nonBlocking, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_CastChar(nonBlocking bool, del bool)(retVal *Tensor) { + + retVal, err := ts._CastChar(nonBlocking, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_CastDouble(nonBlocking bool, del bool)(retVal *Tensor) { + + retVal, err := ts._CastDouble(nonBlocking, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_CastFloat(nonBlocking bool, del bool)(retVal *Tensor) { + + retVal, err := ts._CastFloat(nonBlocking, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_CastHalf(nonBlocking bool, del bool)(retVal *Tensor) { + + retVal, err := ts._CastHalf(nonBlocking, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_CastInt(nonBlocking bool, del bool)(retVal *Tensor) { + + retVal, err := ts._CastInt(nonBlocking, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_CastLong(nonBlocking bool, del bool)(retVal *Tensor) { + + retVal, err := ts._CastLong(nonBlocking, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_CastShort(nonBlocking bool, del bool)(retVal *Tensor) { + + retVal, err := ts._CastShort(nonBlocking, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_Cat(tensors []Tensor, dim int64)(retVal *Tensor) { + + retVal, err := _Cat(tensors, dim) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_CatOut(out *Tensor, tensors []Tensor, dim int64)(retVal *Tensor) { + + retVal, err := _CatOut(out, tensors, dim) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_CdistBackward(grad *Tensor, x1 *Tensor, x2 *Tensor, p float64, cdist *Tensor)(retVal *Tensor) { + + retVal, err := _CdistBackward(grad, x1, x2, p, cdist) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_CholeskyHelper(upper bool, del bool)(retVal *Tensor) { + + retVal, err := ts._CholeskyHelper(upper, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_CholeskySolveHelper(a *Tensor, upper bool, del bool)(retVal *Tensor) { + + retVal, err := ts._CholeskySolveHelper(a, upper, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_Coalesced_(coalesced bool)() { + + err := ts._Coalesced_(coalesced) + if err != nil { log.Fatal(err) } + + return +} + +func Must_ComputeLinearCombination(input *Tensor, coefficients *Tensor)(retVal *Tensor) { + + retVal, err := _ComputeLinearCombination(input, coefficients) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_ComputeLinearCombinationOut(out *Tensor, input *Tensor, coefficients *Tensor)(retVal *Tensor) { + + retVal, err := _ComputeLinearCombinationOut(out, input, coefficients) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_Conj(del bool)(retVal *Tensor) { + + retVal, err := ts._Conj(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_Convolution(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64, groups int64, benchmark bool, deterministic bool, cudnnEnabled bool)(retVal *Tensor) { + + retVal, err := _Convolution(input, weight, bias, stride, padding, dilation, transposed, outputPadding, groups, benchmark, deterministic, cudnnEnabled) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_Convolution1(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64, groups int64, benchmark bool, deterministic bool, cudnnEnabled bool, allowTf32 bool)(retVal *Tensor) { + + retVal, err := _Convolution1(input, weight, bias, stride, padding, dilation, transposed, outputPadding, groups, benchmark, deterministic, cudnnEnabled, allowTf32) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_ConvolutionNogroup(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64)(retVal *Tensor) { + + retVal, err := _ConvolutionNogroup(input, weight, bias, stride, padding, dilation, transposed, outputPadding) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_CopyFrom(dst *Tensor, nonBlocking bool, del bool)(retVal *Tensor) { + + retVal, err := ts._CopyFrom(dst, nonBlocking, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_CtcLossBackward(grad *Tensor, logProbs *Tensor, targets *Tensor, inputLengths []int64, targetLengths []int64, negLogLikelihood *Tensor, logAlpha *Tensor, blank int64, zeroInfinity bool)(retVal *Tensor) { + + retVal, err := _CtcLossBackward(grad, logProbs, targets, inputLengths, targetLengths, negLogLikelihood, logAlpha, blank, zeroInfinity) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_CudnnInitDropoutState(dropout float64, train bool, dropoutSeed int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := _CudnnInitDropoutState(dropout, train, dropoutSeed, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_CudnnRnnFlattenWeight(weightArr []Tensor, weightStride0 int64, inputSize int64, mode int64, hiddenSize int64, numLayers int64, batchFirst bool, bidirectional bool)(retVal *Tensor) { + + retVal, err := _CudnnRnnFlattenWeight(weightArr, weightStride0, inputSize, mode, hiddenSize, numLayers, batchFirst, bidirectional) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_Cumprod(dim int64, del bool)(retVal *Tensor) { + + retVal, err := ts._Cumprod(dim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_CumprodOut(out *Tensor, dim int64, del bool)(retVal *Tensor) { + + retVal, err := ts._CumprodOut(out, dim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_Cumsum(dim int64, del bool)(retVal *Tensor) { + + retVal, err := ts._Cumsum(dim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_CumsumOut(out *Tensor, dim int64, del bool)(retVal *Tensor) { + + retVal, err := ts._CumsumOut(out, dim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_DimArange(like *Tensor, dim int64)(retVal *Tensor) { + + retVal, err := _DimArange(like, dim) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_DirichletGrad(x *Tensor, alpha *Tensor, total *Tensor)(retVal *Tensor) { + + retVal, err := _DirichletGrad(x, alpha, total) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_EmbeddingBagBackward(grad *Tensor, indices *Tensor, offsets *Tensor, offset2bag *Tensor, bagSize *Tensor, maximumIndices *Tensor, numWeights int64, scaleGradByFreq bool, mode int64, sparse bool, perSampleWeights *Tensor)(retVal *Tensor) { + + retVal, err := _EmbeddingBagBackward(grad, indices, offsets, offset2bag, bagSize, maximumIndices, numWeights, scaleGradByFreq, mode, sparse, perSampleWeights) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_EmbeddingBagDenseBackward(grad *Tensor, indices *Tensor, offsets *Tensor, offset2bag *Tensor, bagSize *Tensor, maximumIndices *Tensor, numWeights int64, scaleGradByFreq bool, mode int64, perSampleWeights *Tensor)(retVal *Tensor) { + + retVal, err := _EmbeddingBagDenseBackward(grad, indices, offsets, offset2bag, bagSize, maximumIndices, numWeights, scaleGradByFreq, mode, perSampleWeights) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_EmbeddingBagPerSampleWeightsBackward(grad *Tensor, weight *Tensor, indices *Tensor, offsets *Tensor, offset2bag *Tensor, mode int64)(retVal *Tensor) { + + retVal, err := _EmbeddingBagPerSampleWeightsBackward(grad, weight, indices, offsets, offset2bag, mode) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_EmbeddingBagSparseBackward(grad *Tensor, indices *Tensor, offsets *Tensor, offset2bag *Tensor, bagSize *Tensor, numWeights int64, scaleGradByFreq bool, mode int64, perSampleWeights *Tensor)(retVal *Tensor) { + + retVal, err := _EmbeddingBagSparseBackward(grad, indices, offsets, offset2bag, bagSize, numWeights, scaleGradByFreq, mode, perSampleWeights) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_EmptyAffineQuantized(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device, scale float64, zeroPoint int64)(retVal *Tensor) { + + retVal, err := _EmptyAffineQuantized(size, optionsKind, optionsDevice, scale, zeroPoint) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_EmptyPerChannelAffineQuantized(size []int64, scales *Tensor, zeroPoints *Tensor, axis int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := _EmptyPerChannelAffineQuantized(size, scales, zeroPoints, axis, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_EuclideanDist(x1 *Tensor, x2 *Tensor)(retVal *Tensor) { + + retVal, err := _EuclideanDist(x1, x2) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_FakeQuantizeLearnablePerChannelAffine(scale *Tensor, zeroPoint *Tensor, axis int64, quantMin int64, quantMax int64, del bool)(retVal *Tensor) { + + retVal, err := ts._FakeQuantizeLearnablePerChannelAffine(scale, zeroPoint, axis, quantMin, quantMax, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_FakeQuantizeLearnablePerTensorAffine(scale *Tensor, zeroPoint *Tensor, quantMin int64, quantMax int64, del bool)(retVal *Tensor) { + + retVal, err := ts._FakeQuantizeLearnablePerTensorAffine(scale, zeroPoint, quantMin, quantMax, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_FftWithSize(signalNdim int64, complexInput bool, complexOutput bool, inverse bool, checkedSignalSizes []int64, normalized bool, onesided bool, outputSizes []int64, del bool)(retVal *Tensor) { + + retVal, err := ts._FftWithSize(signalNdim, complexInput, complexOutput, inverse, checkedSignalSizes, normalized, onesided, outputSizes, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_FftWithSize1(signalNdim int64, complexInput bool, complexOutput bool, inverse bool, checkedSignalSizes []int64, normalization int64, onesided bool, outputSizes []int64, del bool)(retVal *Tensor) { + + retVal, err := ts._FftWithSize1(signalNdim, complexInput, complexOutput, inverse, checkedSignalSizes, normalization, onesided, outputSizes, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_GatherSparseBackward(dim int64, index *Tensor, grad *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts._GatherSparseBackward(dim, index, grad, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_GridSampler2dCpuFallback(input *Tensor, grid *Tensor, interpolationMode int64, paddingMode int64, alignCorners bool)(retVal *Tensor) { + + retVal, err := _GridSampler2dCpuFallback(input, grid, interpolationMode, paddingMode, alignCorners) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_IndexCopy_(dim int64, index *Tensor, source *Tensor)() { + + err := ts._IndexCopy_(dim, index, source) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) Must_IndexPutImpl_(indices []Tensor, values *Tensor, accumulate bool, unsafety bool)() { + + err := ts._IndexPutImpl_(indices, values, accumulate, unsafety) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) Must_Indices(del bool)(retVal *Tensor) { + + retVal, err := ts._Indices(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_InverseHelper(del bool)(retVal *Tensor) { + + retVal, err := ts._InverseHelper(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_LogSoftmax(dim int64, halfToFloat bool, del bool)(retVal *Tensor) { + + retVal, err := ts._LogSoftmax(dim, halfToFloat, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_LogSoftmaxBackwardData(gradOutput *Tensor, output *Tensor, dim int64, del bool)(retVal *Tensor) { + + retVal, err := ts._LogSoftmaxBackwardData(gradOutput, output, dim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_Logcumsumexp(dim int64, del bool)(retVal *Tensor) { + + retVal, err := ts._Logcumsumexp(dim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_LogcumsumexpOut(out *Tensor, dim int64, del bool)(retVal *Tensor) { + + retVal, err := ts._LogcumsumexpOut(out, dim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_LuSolveHelper(lUData *Tensor, lUPivots *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts._LuSolveHelper(lUData, lUPivots, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_MakePerChannelQuantizedTensor(scale *Tensor, zeroPoint *Tensor, axis int64, del bool)(retVal *Tensor) { + + retVal, err := ts._MakePerChannelQuantizedTensor(scale, zeroPoint, axis, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_MakePerTensorQuantizedTensor(scale float64, zeroPoint int64, del bool)(retVal *Tensor) { + + retVal, err := ts._MakePerTensorQuantizedTensor(scale, zeroPoint, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_MaskedScale(mask *Tensor, scale float64, del bool)(retVal *Tensor) { + + retVal, err := ts._MaskedScale(mask, scale, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_MkldnnReshape(shape []int64, del bool)(retVal *Tensor) { + + retVal, err := ts._MkldnnReshape(shape, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_MkldnnTranspose(dim0 int64, dim1 int64, del bool)(retVal *Tensor) { + + retVal, err := ts._MkldnnTranspose(dim0, dim1, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_MkldnnTranspose_(dim0 int64, dim1 int64)() { + + err := ts._MkldnnTranspose_(dim0, dim1) + if err != nil { log.Fatal(err) } + + return +} + +func Must_MultinomialAliasDraw(j *Tensor, q *Tensor, numSamples int64)(retVal *Tensor) { + + retVal, err := _MultinomialAliasDraw(j, q, numSamples) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_NnpackSpatialConvolution(input *Tensor, weight *Tensor, bias *Tensor, padding []int64, stride []int64)(retVal *Tensor) { + + retVal, err := _NnpackSpatialConvolution(input, weight, bias, padding, stride) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_NnpackSpatialConvolutionBackwardInput(input *Tensor, gradOutput *Tensor, weight *Tensor, padding []int64)(retVal *Tensor) { + + retVal, err := _NnpackSpatialConvolutionBackwardInput(input, gradOutput, weight, padding) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_NnpackSpatialConvolutionBackwardWeight(input *Tensor, weightsize []int64, gradOutput *Tensor, padding []int64)(retVal *Tensor) { + + retVal, err := _NnpackSpatialConvolutionBackwardWeight(input, weightsize, gradOutput, padding) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_PackPaddedSequenceBackward(grad *Tensor, inputSize []int64, batchSizes *Tensor, batchFirst bool)(retVal *Tensor) { + + retVal, err := _PackPaddedSequenceBackward(grad, inputSize, batchSizes, batchFirst) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_PdistBackward(grad *Tensor, p float64, pdist *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts._PdistBackward(grad, p, pdist, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_RemoveBatchDim(level int64, batchSize int64, outDim int64, del bool)(retVal *Tensor) { + + retVal, err := ts._RemoveBatchDim(level, batchSize, outDim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_ReshapeFromTensor(shape *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts._ReshapeFromTensor(shape, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_SWhere(condition *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts._SWhere(condition, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_SampleDirichlet(del bool)(retVal *Tensor) { + + retVal, err := ts._SampleDirichlet(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_SaturateWeightToFp16(weight *Tensor)(retVal *Tensor) { + + retVal, err := _SaturateWeightToFp16(weight) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_ShapeAsTensor(del bool)(retVal *Tensor) { + + retVal, err := ts._ShapeAsTensor(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_SobolEngineFf_(n int64, sobolstate *Tensor, dimension int64, numGenerated int64)() { + + err := ts._SobolEngineFf_(n, sobolstate, dimension, numGenerated) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) Must_SobolEngineInitializeState_(dimension int64)() { + + err := ts._SobolEngineInitializeState_(dimension) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) Must_SobolEngineScramble_(ltm *Tensor, dimension int64)() { + + err := ts._SobolEngineScramble_(ltm, dimension) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) Must_Softmax(dim int64, halfToFloat bool, del bool)(retVal *Tensor) { + + retVal, err := ts._Softmax(dim, halfToFloat, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_SoftmaxBackwardData(gradOutput *Tensor, output *Tensor, dim int64, del bool)(retVal *Tensor) { + + retVal, err := ts._SoftmaxBackwardData(gradOutput, output, dim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_SparseAddmm(sparse *Tensor, dense *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts._SparseAddmm(sparse, dense, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_SparseCooTensorUnsafe(indices *Tensor, values *Tensor, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := _SparseCooTensorUnsafe(indices, values, size, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_SparseCooTensorWithDims(sparseDim int64, denseDim int64, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := _SparseCooTensorWithDims(sparseDim, denseDim, size, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_SparseCooTensorWithDimsAndTensors(sparseDim int64, denseDim int64, size []int64, indices *Tensor, values *Tensor, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := _SparseCooTensorWithDimsAndTensors(sparseDim, denseDim, size, indices, values, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_SparseLogSoftmax(dim int64, dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts._SparseLogSoftmax(dim, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_SparseLogSoftmax1(dim int64, halfToFloat bool, del bool)(retVal *Tensor) { + + retVal, err := ts._SparseLogSoftmax1(dim, halfToFloat, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_SparseLogSoftmaxBackwardData(gradOutput *Tensor, output *Tensor, dim int64, del bool)(retVal *Tensor) { + + retVal, err := ts._SparseLogSoftmaxBackwardData(gradOutput, output, dim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_SparseMm(sparse *Tensor, dense *Tensor)(retVal *Tensor) { + + retVal, err := _SparseMm(sparse, dense) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_SparseSoftmax(dim int64, dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts._SparseSoftmax(dim, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_SparseSoftmax1(dim int64, halfToFloat bool, del bool)(retVal *Tensor) { + + retVal, err := ts._SparseSoftmax1(dim, halfToFloat, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_SparseSoftmaxBackwardData(gradOutput *Tensor, output *Tensor, dim int64, del bool)(retVal *Tensor) { + + retVal, err := ts._SparseSoftmaxBackwardData(gradOutput, output, dim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_SparseSum(del bool)(retVal *Tensor) { + + retVal, err := ts._SparseSum(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_SparseSum1(dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts._SparseSum1(dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_SparseSum2(dim []int64, del bool)(retVal *Tensor) { + + retVal, err := ts._SparseSum2(dim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_SparseSum3(dim []int64, dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts._SparseSum3(dim, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_SparseSumBackward(grad *Tensor, dim []int64, del bool)(retVal *Tensor) { + + retVal, err := ts._SparseSumBackward(grad, dim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_StandardGamma(del bool)(retVal *Tensor) { + + retVal, err := ts._StandardGamma(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_StandardGammaGrad(output *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts._StandardGammaGrad(output, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_Std(unbiased bool, del bool)(retVal *Tensor) { + + retVal, err := ts._Std(unbiased, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_TestOptionalFilledIntlist(values *Tensor, addends []int64)(retVal *Tensor) { + + retVal, err := _TestOptionalFilledIntlist(values, addends) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_TestOptionalIntlist(values *Tensor, addends []int64)(retVal *Tensor) { + + retVal, err := _TestOptionalIntlist(values, addends) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_TestSerializationSubcmul(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts._TestSerializationSubcmul(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_Trilinear(i1 *Tensor, i2 *Tensor, i3 *Tensor, expand1 []int64, expand2 []int64, expand3 []int64, sumdim []int64, unrollDim int64)(retVal *Tensor) { + + retVal, err := _Trilinear(i1, i2, i3, expand1, expand2, expand3, sumdim, unrollDim) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_UnsafeView(size []int64, del bool)(retVal *Tensor) { + + retVal, err := ts._UnsafeView(size, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_Values(del bool)(retVal *Tensor) { + + retVal, err := ts._Values(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) Must_Var(unbiased bool, del bool)(retVal *Tensor) { + + retVal, err := ts._Var(unbiased, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func Must_WeightNorm(v *Tensor, g *Tensor, dim int64)(retVal *Tensor) { + + retVal, err := _WeightNorm(v, g, dim) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAbs(del bool)(retVal *Tensor) { + + retVal, err := ts.Abs(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAbs_()() { + + err := ts.Abs_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustAbsOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.AbsOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAbsolute(del bool)(retVal *Tensor) { + + retVal, err := ts.Absolute(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAbsolute_()() { + + err := ts.Absolute_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustAbsoluteOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.AbsoluteOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAcos(del bool)(retVal *Tensor) { + + retVal, err := ts.Acos(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAcos_()() { + + err := ts.Acos_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustAcosOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.AcosOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAcosh(del bool)(retVal *Tensor) { + + retVal, err := ts.Acosh(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAcosh_()() { + + err := ts.Acosh_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustAcoshOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.AcoshOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAdaptiveAvgPool1d(outputSize []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.AdaptiveAvgPool1d(outputSize, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAdaptiveAvgPool2d(outputSize []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.AdaptiveAvgPool2d(outputSize, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAdaptiveAvgPool2dOut(out *Tensor, outputSize []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.AdaptiveAvgPool2dOut(out, outputSize, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAdaptiveAvgPool3d(outputSize []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.AdaptiveAvgPool3d(outputSize, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAdaptiveAvgPool3dBackward(gradOutput *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.AdaptiveAvgPool3dBackward(gradOutput, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAdaptiveAvgPool3dBackwardOut(gradInput *Tensor, gradOutput *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.AdaptiveAvgPool3dBackwardOut(gradInput, gradOutput, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAdaptiveAvgPool3dOut(out *Tensor, outputSize []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.AdaptiveAvgPool3dOut(out, outputSize, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAdaptiveMaxPool2dBackward(gradOutput *Tensor, indices *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.AdaptiveMaxPool2dBackward(gradOutput, indices, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAdaptiveMaxPool2dBackwardOut(gradInput *Tensor, gradOutput *Tensor, indices *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.AdaptiveMaxPool2dBackwardOut(gradInput, gradOutput, indices, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAdaptiveMaxPool3dBackward(gradOutput *Tensor, indices *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.AdaptiveMaxPool3dBackward(gradOutput, indices, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAdaptiveMaxPool3dBackwardOut(gradInput *Tensor, gradOutput *Tensor, indices *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.AdaptiveMaxPool3dBackwardOut(gradInput, gradOutput, indices, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAdd(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Add(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAdd1(other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.Add1(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAdd_(other *Tensor)() { + + err := ts.Add_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustAdd1_(other *Scalar)() { + + err := ts.Add1_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustAddOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.AddOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAddbmm(batch1 *Tensor, batch2 *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Addbmm(batch1, batch2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAddbmm_(batch1 *Tensor, batch2 *Tensor)() { + + err := ts.Addbmm_(batch1, batch2) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustAddbmmOut(out *Tensor, batch1 *Tensor, batch2 *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.AddbmmOut(out, batch1, batch2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAddcdiv(tensor1 *Tensor, tensor2 *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Addcdiv(tensor1, tensor2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAddcdiv_(tensor1 *Tensor, tensor2 *Tensor)() { + + err := ts.Addcdiv_(tensor1, tensor2) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustAddcdivOut(out *Tensor, tensor1 *Tensor, tensor2 *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.AddcdivOut(out, tensor1, tensor2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAddcmul(tensor1 *Tensor, tensor2 *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Addcmul(tensor1, tensor2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAddcmul_(tensor1 *Tensor, tensor2 *Tensor)() { + + err := ts.Addcmul_(tensor1, tensor2) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustAddcmulOut(out *Tensor, tensor1 *Tensor, tensor2 *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.AddcmulOut(out, tensor1, tensor2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAddmm(mat1 *Tensor, mat2 *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Addmm(mat1, mat2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAddmm_(mat1 *Tensor, mat2 *Tensor)() { + + err := ts.Addmm_(mat1, mat2) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustAddmmOut(out *Tensor, mat1 *Tensor, mat2 *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.AddmmOut(out, mat1, mat2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAddmv(mat *Tensor, vec *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Addmv(mat, vec, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAddmv_(mat *Tensor, vec *Tensor)() { + + err := ts.Addmv_(mat, vec) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustAddmvOut(out *Tensor, mat *Tensor, vec *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.AddmvOut(out, mat, vec, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAddr(vec1 *Tensor, vec2 *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Addr(vec1, vec2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAddr_(vec1 *Tensor, vec2 *Tensor)() { + + err := ts.Addr_(vec1, vec2) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustAddrOut(out *Tensor, vec1 *Tensor, vec2 *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.AddrOut(out, vec1, vec2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustAffineGridGenerator(theta *Tensor, size []int64, alignCorners bool)(retVal *Tensor) { + + retVal, err := AffineGridGenerator(theta, size, alignCorners) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustAffineGridGeneratorBackward(grad *Tensor, size []int64, alignCorners bool)(retVal *Tensor) { + + retVal, err := AffineGridGeneratorBackward(grad, size, alignCorners) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAlias(del bool)(retVal *Tensor) { + + retVal, err := ts.Alias(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAlignAs(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.AlignAs(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAll(del bool)(retVal *Tensor) { + + retVal, err := ts.All(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAll1(dim int64, keepdim bool, del bool)(retVal *Tensor) { + + retVal, err := ts.All1(dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAllOut(out *Tensor, dim int64, keepdim bool, del bool)(retVal *Tensor) { + + retVal, err := ts.AllOut(out, dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustAlphaDropout(input *Tensor, p float64, train bool)(retVal *Tensor) { + + retVal, err := AlphaDropout(input, p, train) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAlphaDropout_(p float64, train bool)() { + + err := ts.AlphaDropout_(p, train) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustAmax(dim []int64, keepdim bool, del bool)(retVal *Tensor) { + + retVal, err := ts.Amax(dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAmaxOut(out *Tensor, dim []int64, keepdim bool, del bool)(retVal *Tensor) { + + retVal, err := ts.AmaxOut(out, dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAmin(dim []int64, keepdim bool, del bool)(retVal *Tensor) { + + retVal, err := ts.Amin(dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAminOut(out *Tensor, dim []int64, keepdim bool, del bool)(retVal *Tensor) { + + retVal, err := ts.AminOut(out, dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAngle(del bool)(retVal *Tensor) { + + retVal, err := ts.Angle(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAngleOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.AngleOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAny(del bool)(retVal *Tensor) { + + retVal, err := ts.Any(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAny1(dim int64, keepdim bool, del bool)(retVal *Tensor) { + + retVal, err := ts.Any1(dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAnyOut(out *Tensor, dim int64, keepdim bool, del bool)(retVal *Tensor) { + + retVal, err := ts.AnyOut(out, dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustArange(end *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := Arange(end, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustArange1(start *Scalar, end *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := Arange1(start, end, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustArange2(start *Scalar, end *Scalar, step *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := Arange2(start, end, step, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustArangeOut(out *Tensor, end *Scalar)(retVal *Tensor) { + + retVal, err := ArangeOut(out, end) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustArangeOut1(out *Tensor, start *Scalar, end *Scalar)(retVal *Tensor) { + + retVal, err := ArangeOut1(out, start, end) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustArccos(del bool)(retVal *Tensor) { + + retVal, err := ts.Arccos(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustArccos_()() { + + err := ts.Arccos_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustArccosOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.ArccosOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustArccosh(del bool)(retVal *Tensor) { + + retVal, err := ts.Arccosh(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustArccosh_()() { + + err := ts.Arccosh_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustArccoshOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.ArccoshOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustArcsin(del bool)(retVal *Tensor) { + + retVal, err := ts.Arcsin(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustArcsin_()() { + + err := ts.Arcsin_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustArcsinOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.ArcsinOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustArcsinh(del bool)(retVal *Tensor) { + + retVal, err := ts.Arcsinh(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustArcsinh_()() { + + err := ts.Arcsinh_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustArcsinhOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.ArcsinhOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustArctan(del bool)(retVal *Tensor) { + + retVal, err := ts.Arctan(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustArctan_()() { + + err := ts.Arctan_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustArctanOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.ArctanOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustArctanh(del bool)(retVal *Tensor) { + + retVal, err := ts.Arctanh(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustArctanh_()() { + + err := ts.Arctanh_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustArctanhOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.ArctanhOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustArgmax(dim []int64, keepdim bool, del bool)(retVal *Tensor) { + + retVal, err := ts.Argmax(dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustArgmin(dim []int64, keepdim bool, del bool)(retVal *Tensor) { + + retVal, err := ts.Argmin(dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustArgsort(dim int64, descending bool, del bool)(retVal *Tensor) { + + retVal, err := ts.Argsort(dim, descending, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAsStrided(size []int64, stride []int64, storageOffset []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.AsStrided(size, stride, storageOffset, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAsStrided_(size []int64, stride []int64, storageOffset []int64)() { + + err := ts.AsStrided_(size, stride, storageOffset) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustAsin(del bool)(retVal *Tensor) { + + retVal, err := ts.Asin(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAsin_()() { + + err := ts.Asin_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustAsinOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.AsinOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAsinh(del bool)(retVal *Tensor) { + + retVal, err := ts.Asinh(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAsinh_()() { + + err := ts.Asinh_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustAsinhOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.AsinhOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAtan(del bool)(retVal *Tensor) { + + retVal, err := ts.Atan(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAtan2(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Atan2(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAtan2_(other *Tensor)() { + + err := ts.Atan2_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustAtan2Out(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Atan2Out(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAtan_()() { + + err := ts.Atan_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustAtanOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.AtanOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAtanh(del bool)(retVal *Tensor) { + + retVal, err := ts.Atanh(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAtanh_()() { + + err := ts.Atanh_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustAtanhOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.AtanhOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAtleast1d(del bool)(retVal *Tensor) { + + retVal, err := ts.Atleast1d(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAtleast2d(del bool)(retVal *Tensor) { + + retVal, err := ts.Atleast2d(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAtleast3d(del bool)(retVal *Tensor) { + + retVal, err := ts.Atleast3d(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAvgPool1d(kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, del bool)(retVal *Tensor) { + + retVal, err := ts.AvgPool1d(kernelSize, stride, padding, ceilMode, countIncludePad, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAvgPool2d(kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.AvgPool2d(kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAvgPool2dBackward(gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.AvgPool2dBackward(gradOutput, kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAvgPool2dBackwardOut(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.AvgPool2dBackwardOut(gradInput, gradOutput, kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAvgPool2dOut(out *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.AvgPool2dOut(out, kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAvgPool3d(kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.AvgPool3d(kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAvgPool3dBackward(gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.AvgPool3dBackward(gradOutput, kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAvgPool3dBackwardOut(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.AvgPool3dBackwardOut(gradInput, gradOutput, kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustAvgPool3dOut(out *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.AvgPool3dOut(out, kernelSize, stride, padding, ceilMode, countIncludePad, divisorOverride, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustBaddbmm(batch1 *Tensor, batch2 *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Baddbmm(batch1, batch2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustBaddbmm_(batch1 *Tensor, batch2 *Tensor)() { + + err := ts.Baddbmm_(batch1, batch2) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustBaddbmmOut(out *Tensor, batch1 *Tensor, batch2 *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.BaddbmmOut(out, batch1, batch2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustBartlettWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := BartlettWindow(windowLength, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustBartlettWindow1(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := BartlettWindow1(windowLength, periodic, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustBatchNorm(input *Tensor, weight *Tensor, bias *Tensor, runningMean *Tensor, runningVar *Tensor, training bool, momentum float64, eps float64, cudnnEnabled bool)(retVal *Tensor) { + + retVal, err := BatchNorm(input, weight, bias, runningMean, runningVar, training, momentum, eps, cudnnEnabled) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustBatchNormBackwardElemt(gradOut *Tensor, input *Tensor, mean *Tensor, invstd *Tensor, weight *Tensor, meanDy *Tensor, meanDyXmu *Tensor)(retVal *Tensor) { + + retVal, err := BatchNormBackwardElemt(gradOut, input, mean, invstd, weight, meanDy, meanDyXmu) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustBatchNormElemt(input *Tensor, weight *Tensor, bias *Tensor, mean *Tensor, invstd *Tensor, eps float64)(retVal *Tensor) { + + retVal, err := BatchNormElemt(input, weight, bias, mean, invstd, eps) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustBatchNormElemtOut(out *Tensor, input *Tensor, weight *Tensor, bias *Tensor, mean *Tensor, invstd *Tensor, eps float64)(retVal *Tensor) { + + retVal, err := BatchNormElemtOut(out, input, weight, bias, mean, invstd, eps) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustBernoulli(del bool)(retVal *Tensor) { + + retVal, err := ts.Bernoulli(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustBernoulli1(p float64, del bool)(retVal *Tensor) { + + retVal, err := ts.Bernoulli1(p, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustBernoulli_(p *Tensor)() { + + err := ts.Bernoulli_(p) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustBernoulli1_(p float64)() { + + err := ts.Bernoulli1_(p) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustBernoulliOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.BernoulliOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustBilinear(input1 *Tensor, input2 *Tensor, weight *Tensor, bias *Tensor)(retVal *Tensor) { + + retVal, err := Bilinear(input1, input2, weight, bias) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustBinaryCrossEntropy(target *Tensor, weight *Tensor, reduction int64, del bool)(retVal *Tensor) { + + retVal, err := ts.BinaryCrossEntropy(target, weight, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustBinaryCrossEntropyBackward(gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, del bool)(retVal *Tensor) { + + retVal, err := ts.BinaryCrossEntropyBackward(gradOutput, target, weight, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustBinaryCrossEntropyBackwardOut(gradInput *Tensor, gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, del bool)(retVal *Tensor) { + + retVal, err := ts.BinaryCrossEntropyBackwardOut(gradInput, gradOutput, target, weight, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustBinaryCrossEntropyOut(out *Tensor, target *Tensor, weight *Tensor, reduction int64, del bool)(retVal *Tensor) { + + retVal, err := ts.BinaryCrossEntropyOut(out, target, weight, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustBinaryCrossEntropyWithLogits(target *Tensor, weight *Tensor, posWeight *Tensor, reduction int64, del bool)(retVal *Tensor) { + + retVal, err := ts.BinaryCrossEntropyWithLogits(target, weight, posWeight, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustBinaryCrossEntropyWithLogitsBackward(gradOutput *Tensor, target *Tensor, weight *Tensor, posWeight *Tensor, reduction int64, del bool)(retVal *Tensor) { + + retVal, err := ts.BinaryCrossEntropyWithLogitsBackward(gradOutput, target, weight, posWeight, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustBincount(weights *Tensor, minlength int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Bincount(weights, minlength, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustBinomial(count *Tensor, prob *Tensor)(retVal *Tensor) { + + retVal, err := Binomial(count, prob) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustBitwiseAnd(other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.BitwiseAnd(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustBitwiseAnd1(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.BitwiseAnd1(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustBitwiseAnd_(other *Scalar)() { + + err := ts.BitwiseAnd_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustBitwiseAnd1_(other *Tensor)() { + + err := ts.BitwiseAnd1_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustBitwiseAndOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.BitwiseAndOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustBitwiseAndOut1(out *Tensor, other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.BitwiseAndOut1(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustBitwiseNot(del bool)(retVal *Tensor) { + + retVal, err := ts.BitwiseNot(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustBitwiseNot_()() { + + err := ts.BitwiseNot_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustBitwiseNotOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.BitwiseNotOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustBitwiseOr(other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.BitwiseOr(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustBitwiseOr1(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.BitwiseOr1(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustBitwiseOr_(other *Scalar)() { + + err := ts.BitwiseOr_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustBitwiseOr1_(other *Tensor)() { + + err := ts.BitwiseOr1_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustBitwiseOrOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.BitwiseOrOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustBitwiseOrOut1(out *Tensor, other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.BitwiseOrOut1(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustBitwiseXor(other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.BitwiseXor(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustBitwiseXor1(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.BitwiseXor1(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustBitwiseXor_(other *Scalar)() { + + err := ts.BitwiseXor_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustBitwiseXor1_(other *Tensor)() { + + err := ts.BitwiseXor1_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustBitwiseXorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.BitwiseXorOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustBitwiseXorOut1(out *Tensor, other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.BitwiseXorOut1(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustBlackmanWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := BlackmanWindow(windowLength, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustBlackmanWindow1(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := BlackmanWindow1(windowLength, periodic, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustBlockDiag(tensors []Tensor)(retVal *Tensor) { + + retVal, err := BlockDiag(tensors) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustBmm(mat2 *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Bmm(mat2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustBmmOut(out *Tensor, mat2 *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.BmmOut(out, mat2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustBucketize(boundaries *Tensor, outInt32 bool, right bool, del bool)(retVal *Tensor) { + + retVal, err := ts.Bucketize(boundaries, outInt32, right, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustBucketize1(selfScalar *Scalar, boundaries *Tensor, outInt32 bool, right bool)(retVal *Tensor) { + + retVal, err := Bucketize1(selfScalar, boundaries, outInt32, right) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustBucketizeOut(out *Tensor, boundaries *Tensor, outInt32 bool, right bool, del bool)(retVal *Tensor) { + + retVal, err := ts.BucketizeOut(out, boundaries, outInt32, right, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustCartesianProd(tensors []Tensor)(retVal *Tensor) { + + retVal, err := CartesianProd(tensors) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustCat(tensors []Tensor, dim int64)(retVal *Tensor) { + + retVal, err := Cat(tensors, dim) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustCatOut(out *Tensor, tensors []Tensor, dim int64)(retVal *Tensor) { + + retVal, err := CatOut(out, tensors, dim) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCauchy_(median float64, sigma float64)() { + + err := ts.Cauchy_(median, sigma) + if err != nil { log.Fatal(err) } + + return +} + +func MustCdist(x1 *Tensor, x2 *Tensor, p float64, computeMode []int64)(retVal *Tensor) { + + retVal, err := Cdist(x1, x2, p, computeMode) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCeil(del bool)(retVal *Tensor) { + + retVal, err := ts.Ceil(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCeil_()() { + + err := ts.Ceil_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustCeilOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.CeilOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCelu(del bool)(retVal *Tensor) { + + retVal, err := ts.Celu(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCelu_()() { + + err := ts.Celu_() + if err != nil { log.Fatal(err) } + + return +} + +func MustChainMatmul(matrices []Tensor)(retVal *Tensor) { + + retVal, err := ChainMatmul(matrices) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustChannelShuffle(groups int64, del bool)(retVal *Tensor) { + + retVal, err := ts.ChannelShuffle(groups, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCholesky(upper bool, del bool)(retVal *Tensor) { + + retVal, err := ts.Cholesky(upper, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCholeskyInverse(upper bool, del bool)(retVal *Tensor) { + + retVal, err := ts.CholeskyInverse(upper, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCholeskyInverseOut(out *Tensor, upper bool, del bool)(retVal *Tensor) { + + retVal, err := ts.CholeskyInverseOut(out, upper, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCholeskyOut(out *Tensor, upper bool, del bool)(retVal *Tensor) { + + retVal, err := ts.CholeskyOut(out, upper, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCholeskySolve(input2 *Tensor, upper bool, del bool)(retVal *Tensor) { + + retVal, err := ts.CholeskySolve(input2, upper, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCholeskySolveOut(out *Tensor, input2 *Tensor, upper bool, del bool)(retVal *Tensor) { + + retVal, err := ts.CholeskySolveOut(out, input2, upper, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustClamp(min *Scalar, max *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.Clamp(min, max, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustClamp_(min *Scalar, max *Scalar)() { + + err := ts.Clamp_(min, max) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustClampMax(max *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.ClampMax(max, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustClampMax_(max *Scalar)() { + + err := ts.ClampMax_(max) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustClampMaxOut(out *Tensor, max *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.ClampMaxOut(out, max, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustClampMin(min *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.ClampMin(min, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustClampMin_(min *Scalar)() { + + err := ts.ClampMin_(min) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustClampMinOut(out *Tensor, min *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.ClampMinOut(out, min, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustClampOut(out *Tensor, min *Scalar, max *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.ClampOut(out, min, max, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustClip(min *Scalar, max *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.Clip(min, max, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustClip_(min *Scalar, max *Scalar)() { + + err := ts.Clip_(min, max) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustClipOut(out *Tensor, min *Scalar, max *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.ClipOut(out, min, max, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCoalesce(del bool)(retVal *Tensor) { + + retVal, err := ts.Coalesce(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCol2im(outputSize []int64, kernelSize []int64, dilation []int64, padding []int64, stride []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Col2im(outputSize, kernelSize, dilation, padding, stride, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustCol2imBackward(gradOutput *Tensor, kernelSize []int64, dilation []int64, padding []int64, stride []int64)(retVal *Tensor) { + + retVal, err := Col2imBackward(gradOutput, kernelSize, dilation, padding, stride) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustCol2imBackwardOut(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, dilation []int64, padding []int64, stride []int64)(retVal *Tensor) { + + retVal, err := Col2imBackwardOut(gradInput, gradOutput, kernelSize, dilation, padding, stride) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCol2imOut(out *Tensor, outputSize []int64, kernelSize []int64, dilation []int64, padding []int64, stride []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Col2imOut(out, outputSize, kernelSize, dilation, padding, stride, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCombinations(r int64, withReplacement bool, del bool)(retVal *Tensor) { + + retVal, err := ts.Combinations(r, withReplacement, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustComplex(real *Tensor, imag *Tensor)(retVal *Tensor) { + + retVal, err := Complex(real, imag) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustComplexOut(out *Tensor, real *Tensor, imag *Tensor)(retVal *Tensor) { + + retVal, err := ComplexOut(out, real, imag) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustConj(del bool)(retVal *Tensor) { + + retVal, err := ts.Conj(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustConjOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.ConjOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustConstantPadNd(pad []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.ConstantPadNd(pad, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustContiguous(del bool)(retVal *Tensor) { + + retVal, err := ts.Contiguous(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustConv1d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, groups int64)(retVal *Tensor) { + + retVal, err := Conv1d(input, weight, bias, stride, padding, dilation, groups) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustConv2d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, groups int64)(retVal *Tensor) { + + retVal, err := Conv2d(input, weight, bias, stride, padding, dilation, groups) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustConv3d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, groups int64)(retVal *Tensor) { + + retVal, err := Conv3d(input, weight, bias, stride, padding, dilation, groups) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustConvTbc(weight *Tensor, bias *Tensor, pad int64, del bool)(retVal *Tensor) { + + retVal, err := ts.ConvTbc(weight, bias, pad, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustConvTranspose1d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, groups int64, dilation []int64)(retVal *Tensor) { + + retVal, err := ConvTranspose1d(input, weight, bias, stride, padding, outputPadding, groups, dilation) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustConvTranspose2d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, groups int64, dilation []int64)(retVal *Tensor) { + + retVal, err := ConvTranspose2d(input, weight, bias, stride, padding, outputPadding, groups, dilation) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustConvTranspose3d(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, groups int64, dilation []int64)(retVal *Tensor) { + + retVal, err := ConvTranspose3d(input, weight, bias, stride, padding, outputPadding, groups, dilation) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustConvolution(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64, groups int64)(retVal *Tensor) { + + retVal, err := Convolution(input, weight, bias, stride, padding, dilation, transposed, outputPadding, groups) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustConvolutionOverrideable(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64, groups int64)(retVal *Tensor) { + + retVal, err := ConvolutionOverrideable(input, weight, bias, stride, padding, dilation, transposed, outputPadding, groups) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCopySparseToSparse_(src *Tensor, nonBlocking bool)() { + + err := ts.CopySparseToSparse_(src, nonBlocking) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustCos(del bool)(retVal *Tensor) { + + retVal, err := ts.Cos(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCos_()() { + + err := ts.Cos_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustCosOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.CosOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCosh(del bool)(retVal *Tensor) { + + retVal, err := ts.Cosh(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCosh_()() { + + err := ts.Cosh_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustCoshOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.CoshOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustCosineEmbeddingLoss(input1 *Tensor, input2 *Tensor, target *Tensor, margin float64, reduction int64)(retVal *Tensor) { + + retVal, err := CosineEmbeddingLoss(input1, input2, target, margin, reduction) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustCosineSimilarity(x1 *Tensor, x2 *Tensor, dim int64, eps float64)(retVal *Tensor) { + + retVal, err := CosineSimilarity(x1, x2, dim, eps) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCountNonzero(dim []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.CountNonzero(dim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCountNonzero1(dim []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.CountNonzero1(dim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCross(other *Tensor, dim []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Cross(other, dim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCrossOut(out *Tensor, other *Tensor, dim []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.CrossOut(out, other, dim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustCtcLoss(logProbs *Tensor, targets *Tensor, inputLengths []int64, targetLengths []int64, blank int64, reduction int64, zeroInfinity bool)(retVal *Tensor) { + + retVal, err := CtcLoss(logProbs, targets, inputLengths, targetLengths, blank, reduction, zeroInfinity) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustCtcLoss1(logProbs *Tensor, targets *Tensor, inputLengths *Tensor, targetLengths *Tensor, blank int64, reduction int64, zeroInfinity bool)(retVal *Tensor) { + + retVal, err := CtcLoss1(logProbs, targets, inputLengths, targetLengths, blank, reduction, zeroInfinity) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustCudnnAffineGridGenerator(theta *Tensor, n int64, c int64, h int64, w int64)(retVal *Tensor) { + + retVal, err := CudnnAffineGridGenerator(theta, n, c, h, w) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustCudnnAffineGridGeneratorBackward(grad *Tensor, n int64, c int64, h int64, w int64)(retVal *Tensor) { + + retVal, err := CudnnAffineGridGeneratorBackward(grad, n, c, h, w) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCudnnConvolution(weight *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal *Tensor) { + + retVal, err := ts.CudnnConvolution(weight, padding, stride, dilation, groups, benchmark, deterministic, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCudnnConvolution1(weight *Tensor, bias *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal *Tensor) { + + retVal, err := ts.CudnnConvolution1(weight, bias, padding, stride, dilation, groups, benchmark, deterministic, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCudnnConvolution2(weight *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, allowTf32 bool, del bool)(retVal *Tensor) { + + retVal, err := ts.CudnnConvolution2(weight, padding, stride, dilation, groups, benchmark, deterministic, allowTf32, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustCudnnConvolutionBackwardInput(selfSize []int64, gradOutput *Tensor, weight *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, allowTf32 bool)(retVal *Tensor) { + + retVal, err := CudnnConvolutionBackwardInput(selfSize, gradOutput, weight, padding, stride, dilation, groups, benchmark, deterministic, allowTf32) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCudnnConvolutionBackwardWeight(weightSize []int64, gradOutput *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, allowTf32 bool, del bool)(retVal *Tensor) { + + retVal, err := ts.CudnnConvolutionBackwardWeight(weightSize, gradOutput, padding, stride, dilation, groups, benchmark, deterministic, allowTf32, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCudnnConvolutionTranspose(weight *Tensor, padding []int64, outputPadding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal *Tensor) { + + retVal, err := ts.CudnnConvolutionTranspose(weight, padding, outputPadding, stride, dilation, groups, benchmark, deterministic, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCudnnConvolutionTranspose1(weight *Tensor, bias *Tensor, padding []int64, outputPadding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal *Tensor) { + + retVal, err := ts.CudnnConvolutionTranspose1(weight, bias, padding, outputPadding, stride, dilation, groups, benchmark, deterministic, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCudnnConvolutionTranspose2(weight *Tensor, padding []int64, outputPadding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, allowTf32 bool, del bool)(retVal *Tensor) { + + retVal, err := ts.CudnnConvolutionTranspose2(weight, padding, outputPadding, stride, dilation, groups, benchmark, deterministic, allowTf32, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustCudnnConvolutionTransposeBackwardInput(gradOutput *Tensor, weight *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, allowTf32 bool)(retVal *Tensor) { + + retVal, err := CudnnConvolutionTransposeBackwardInput(gradOutput, weight, padding, stride, dilation, groups, benchmark, deterministic, allowTf32) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCudnnConvolutionTransposeBackwardWeight(weightSize []int64, gradOutput *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, allowTf32 bool, del bool)(retVal *Tensor) { + + retVal, err := ts.CudnnConvolutionTransposeBackwardWeight(weightSize, gradOutput, padding, stride, dilation, groups, benchmark, deterministic, allowTf32, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCudnnGridSampler(grid *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.CudnnGridSampler(grid, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustCummaxminBackward(grad *Tensor, input *Tensor, indices *Tensor, dim int64)(retVal *Tensor) { + + retVal, err := CummaxminBackward(grad, input, indices, dim) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCumprod(dim int64, dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts.Cumprod(dim, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustCumprodBackward(grad *Tensor, input *Tensor, dim int64)(retVal *Tensor) { + + retVal, err := CumprodBackward(grad, input, dim) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCumprodOut(out *Tensor, dim int64, dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts.CumprodOut(out, dim, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCumsum(dim int64, dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts.Cumsum(dim, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustCumsumOut(out *Tensor, dim int64, dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts.CumsumOut(out, dim, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustData(del bool)(retVal *Tensor) { + + retVal, err := ts.Data(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustDeg2rad(del bool)(retVal *Tensor) { + + retVal, err := ts.Deg2rad(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustDeg2rad_()() { + + err := ts.Deg2rad_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustDeg2radOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Deg2radOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustDequantize(del bool)(retVal *Tensor) { + + retVal, err := ts.Dequantize(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustDet(del bool)(retVal *Tensor) { + + retVal, err := ts.Det(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustDetach(del bool)(retVal *Tensor) { + + retVal, err := ts.Detach(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustDetach_()() { + + err := ts.Detach_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustDiag(diagonal int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Diag(diagonal, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustDiagBackward(grad *Tensor, inputSizes []int64, diagonal int64)(retVal *Tensor) { + + retVal, err := DiagBackward(grad, inputSizes, diagonal) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustDiagEmbed(offset int64, dim1 int64, dim2 int64, del bool)(retVal *Tensor) { + + retVal, err := ts.DiagEmbed(offset, dim1, dim2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustDiagOut(out *Tensor, diagonal int64, del bool)(retVal *Tensor) { + + retVal, err := ts.DiagOut(out, diagonal, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustDiagflat(offset int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Diagflat(offset, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustDiagonal(offset int64, dim1 int64, dim2 int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Diagonal(offset, dim1, dim2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustDiagonalBackward(grad *Tensor, inputSizes []int64, offset int64, dim1 int64, dim2 int64)(retVal *Tensor) { + + retVal, err := DiagonalBackward(grad, inputSizes, offset, dim1, dim2) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustDigamma(del bool)(retVal *Tensor) { + + retVal, err := ts.Digamma(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustDigamma_()() { + + err := ts.Digamma_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustDigammaOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.DigammaOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustDist(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Dist(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustDiv(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Div(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustDiv1(other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.Div1(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustDiv_(other *Tensor)() { + + err := ts.Div_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustDiv1_(other *Scalar)() { + + err := ts.Div1_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustDivOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.DivOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustDivide(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Divide(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustDivide1(other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.Divide1(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustDivide_(other *Tensor)() { + + err := ts.Divide_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustDivide1_(other *Scalar)() { + + err := ts.Divide1_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustDivideOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.DivideOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustDot(tensor *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Dot(tensor, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustDotOut(out *Tensor, tensor *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.DotOut(out, tensor, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustDropout(input *Tensor, p float64, train bool)(retVal *Tensor) { + + retVal, err := Dropout(input, p, train) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustDropout_(p float64, train bool)() { + + err := ts.Dropout_(p, train) + if err != nil { log.Fatal(err) } + + return +} + +func MustDstack(tensors []Tensor)(retVal *Tensor) { + + retVal, err := Dstack(tensors) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustDstackOut(out *Tensor, tensors []Tensor)(retVal *Tensor) { + + retVal, err := DstackOut(out, tensors) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustEinsum(equation string, tensors []Tensor)(retVal *Tensor) { + + retVal, err := Einsum(equation, tensors) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustElu(del bool)(retVal *Tensor) { + + retVal, err := ts.Elu(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustElu_()() { + + err := ts.Elu_() + if err != nil { log.Fatal(err) } + + return +} + +func MustEluBackward(gradOutput *Tensor, alpha *Scalar, scale *Scalar, inputScale *Scalar, output *Tensor)(retVal *Tensor) { + + retVal, err := EluBackward(gradOutput, alpha, scale, inputScale, output) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustEluBackwardOut(gradInput *Tensor, gradOutput *Tensor, alpha *Scalar, scale *Scalar, inputScale *Scalar, output *Tensor)(retVal *Tensor) { + + retVal, err := EluBackwardOut(gradInput, gradOutput, alpha, scale, inputScale, output) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustEluOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.EluOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustEmbedding(weight *Tensor, indices *Tensor, paddingIdx int64, scaleGradByFreq bool, sparse bool)(retVal *Tensor) { + + retVal, err := Embedding(weight, indices, paddingIdx, scaleGradByFreq, sparse) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustEmbeddingBackward(grad *Tensor, indices *Tensor, numWeights int64, paddingIdx int64, scaleGradByFreq bool, sparse bool)(retVal *Tensor) { + + retVal, err := EmbeddingBackward(grad, indices, numWeights, paddingIdx, scaleGradByFreq, sparse) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustEmbeddingDenseBackward(gradOutput *Tensor, indices *Tensor, numWeights int64, paddingIdx int64, scaleGradByFreq bool)(retVal *Tensor) { + + retVal, err := EmbeddingDenseBackward(gradOutput, indices, numWeights, paddingIdx, scaleGradByFreq) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustEmbeddingRenorm_(indices *Tensor, maxNorm float64, normType float64)() { + + err := ts.EmbeddingRenorm_(indices, maxNorm, normType) + if err != nil { log.Fatal(err) } + + return +} + +func MustEmbeddingSparseBackward(grad *Tensor, indices *Tensor, numWeights int64, paddingIdx int64, scaleGradByFreq bool)(retVal *Tensor) { + + retVal, err := EmbeddingSparseBackward(grad, indices, numWeights, paddingIdx, scaleGradByFreq) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustEmpty(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := Empty(size, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustEmptyLike(del bool)(retVal *Tensor) { + + retVal, err := ts.EmptyLike(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustEmptyMeta(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := EmptyMeta(size, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustEmptyOut(out *Tensor, size []int64)(retVal *Tensor) { + + retVal, err := EmptyOut(out, size) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustEmptyQuantized(size []int64, qtensor *Tensor)(retVal *Tensor) { + + retVal, err := EmptyQuantized(size, qtensor) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustEmptyStrided(size []int64, stride []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := EmptyStrided(size, stride, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustEq(other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.Eq(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustEq1(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Eq1(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustEq_(other *Scalar)() { + + err := ts.Eq_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustEq1_(other *Tensor)() { + + err := ts.Eq1_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustEqOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.EqOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustEqOut1(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.EqOut1(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustErf(del bool)(retVal *Tensor) { + + retVal, err := ts.Erf(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustErf_()() { + + err := ts.Erf_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustErfOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.ErfOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustErfc(del bool)(retVal *Tensor) { + + retVal, err := ts.Erfc(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustErfc_()() { + + err := ts.Erfc_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustErfcOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.ErfcOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustErfinv(del bool)(retVal *Tensor) { + + retVal, err := ts.Erfinv(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustErfinv_()() { + + err := ts.Erfinv_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustErfinvOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.ErfinvOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustExp(del bool)(retVal *Tensor) { + + retVal, err := ts.Exp(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustExp2(del bool)(retVal *Tensor) { + + retVal, err := ts.Exp2(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustExp2_()() { + + err := ts.Exp2_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustExp2Out(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Exp2Out(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustExp_()() { + + err := ts.Exp_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustExpOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.ExpOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustExpand(size []int64, implicit bool, del bool)(retVal *Tensor) { + + retVal, err := ts.Expand(size, implicit, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustExpandAs(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.ExpandAs(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustExpm1(del bool)(retVal *Tensor) { + + retVal, err := ts.Expm1(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustExpm1_()() { + + err := ts.Expm1_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustExpm1Out(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Expm1Out(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustExponential_(lambd float64)() { + + err := ts.Exponential_(lambd) + if err != nil { log.Fatal(err) } + + return +} + +func MustEye(n int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := Eye(n, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustEye1(n int64, m int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := Eye1(n, m, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustEyeOut(out *Tensor, n int64)(retVal *Tensor) { + + retVal, err := EyeOut(out, n) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustEyeOut1(out *Tensor, n int64, m int64)(retVal *Tensor) { + + retVal, err := EyeOut1(out, n, m) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFakeQuantizePerChannelAffine(scale *Tensor, zeroPoint *Tensor, axis int64, quantMin int64, quantMax int64, del bool)(retVal *Tensor) { + + retVal, err := ts.FakeQuantizePerChannelAffine(scale, zeroPoint, axis, quantMin, quantMax, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFakeQuantizePerChannelAffineBackward(grad *Tensor, scale *Tensor, zeroPoint *Tensor, axis int64, quantMin int64, quantMax int64, del bool)(retVal *Tensor) { + + retVal, err := ts.FakeQuantizePerChannelAffineBackward(grad, scale, zeroPoint, axis, quantMin, quantMax, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFakeQuantizePerTensorAffine(scale float64, zeroPoint int64, quantMin int64, quantMax int64, del bool)(retVal *Tensor) { + + retVal, err := ts.FakeQuantizePerTensorAffine(scale, zeroPoint, quantMin, quantMax, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFakeQuantizePerTensorAffineBackward(grad *Tensor, scale float64, zeroPoint int64, quantMin int64, quantMax int64, del bool)(retVal *Tensor) { + + retVal, err := ts.FakeQuantizePerTensorAffineBackward(grad, scale, zeroPoint, quantMin, quantMax, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustFbgemmLinearFp16Weight(input *Tensor, packedWeight *Tensor, bias *Tensor)(retVal *Tensor) { + + retVal, err := FbgemmLinearFp16Weight(input, packedWeight, bias) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustFbgemmLinearFp16WeightFp32Activation(input *Tensor, packedWeight *Tensor, bias *Tensor)(retVal *Tensor) { + + retVal, err := FbgemmLinearFp16WeightFp32Activation(input, packedWeight, bias) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustFbgemmLinearInt8Weight(input *Tensor, weight *Tensor, packed *Tensor, colOffsets *Tensor, weightScale *Scalar, weightZeroPoint *Scalar, bias *Tensor)(retVal *Tensor) { + + retVal, err := FbgemmLinearInt8Weight(input, weight, packed, colOffsets, weightScale, weightZeroPoint, bias) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustFbgemmLinearInt8WeightFp32Activation(input *Tensor, weight *Tensor, packed *Tensor, colOffsets *Tensor, weightScale *Scalar, weightZeroPoint *Scalar, bias *Tensor)(retVal *Tensor) { + + retVal, err := FbgemmLinearInt8WeightFp32Activation(input, weight, packed, colOffsets, weightScale, weightZeroPoint, bias) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustFbgemmPackGemmMatrixFp16(input *Tensor)(retVal *Tensor) { + + retVal, err := FbgemmPackGemmMatrixFp16(input) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustFbgemmPackQuantizedMatrix(input *Tensor)(retVal *Tensor) { + + retVal, err := FbgemmPackQuantizedMatrix(input) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustFbgemmPackQuantizedMatrix1(input *Tensor, k int64, n int64)(retVal *Tensor) { + + retVal, err := FbgemmPackQuantizedMatrix1(input, k, n) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustFeatureAlphaDropout(input *Tensor, p float64, train bool)(retVal *Tensor) { + + retVal, err := FeatureAlphaDropout(input, p, train) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFeatureAlphaDropout_(p float64, train bool)() { + + err := ts.FeatureAlphaDropout_(p, train) + if err != nil { log.Fatal(err) } + + return +} + +func MustFeatureDropout(input *Tensor, p float64, train bool)(retVal *Tensor) { + + retVal, err := FeatureDropout(input, p, train) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFeatureDropout_(p float64, train bool)() { + + err := ts.FeatureDropout_(p, train) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustFft(signalNdim int64, normalized bool, del bool)(retVal *Tensor) { + + retVal, err := ts.Fft(signalNdim, normalized, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFftFft(n []int64, dim int64, norm string, del bool)(retVal *Tensor) { + + retVal, err := ts.FftFft(n, dim, norm, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFftFftn(s []int64, dim []int64, norm string, del bool)(retVal *Tensor) { + + retVal, err := ts.FftFftn(s, dim, norm, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFftHfft(n []int64, dim int64, norm string, del bool)(retVal *Tensor) { + + retVal, err := ts.FftHfft(n, dim, norm, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFftIfft(n []int64, dim int64, norm string, del bool)(retVal *Tensor) { + + retVal, err := ts.FftIfft(n, dim, norm, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFftIfftn(s []int64, dim []int64, norm string, del bool)(retVal *Tensor) { + + retVal, err := ts.FftIfftn(s, dim, norm, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFftIhfft(n []int64, dim int64, norm string, del bool)(retVal *Tensor) { + + retVal, err := ts.FftIhfft(n, dim, norm, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFftIrfft(n []int64, dim int64, norm string, del bool)(retVal *Tensor) { + + retVal, err := ts.FftIrfft(n, dim, norm, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFftIrfftn(s []int64, dim []int64, norm string, del bool)(retVal *Tensor) { + + retVal, err := ts.FftIrfftn(s, dim, norm, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFftRfft(n []int64, dim int64, norm string, del bool)(retVal *Tensor) { + + retVal, err := ts.FftRfft(n, dim, norm, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFftRfftn(s []int64, dim []int64, norm string, del bool)(retVal *Tensor) { + + retVal, err := ts.FftRfftn(s, dim, norm, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFill_(value *Scalar)() { + + err := ts.Fill_(value) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustFill1_(value *Tensor)() { + + err := ts.Fill1_(value) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustFillDiagonal_(fillValue *Scalar, wrap bool)() { + + err := ts.FillDiagonal_(fillValue, wrap) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustFix(del bool)(retVal *Tensor) { + + retVal, err := ts.Fix(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFix_()() { + + err := ts.Fix_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustFixOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.FixOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFlatten(startDim int64, endDim int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Flatten(startDim, endDim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFlip(dims []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Flip(dims, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFliplr(del bool)(retVal *Tensor) { + + retVal, err := ts.Fliplr(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFlipud(del bool)(retVal *Tensor) { + + retVal, err := ts.Flipud(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFloor(del bool)(retVal *Tensor) { + + retVal, err := ts.Floor(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFloor_()() { + + err := ts.Floor_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustFloorDivide(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.FloorDivide(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFloorDivide1(other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.FloorDivide1(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFloorDivide_(other *Tensor)() { + + err := ts.FloorDivide_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustFloorDivide1_(other *Scalar)() { + + err := ts.FloorDivide1_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustFloorDivideOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.FloorDivideOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFloorOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.FloorOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFmod(other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.Fmod(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFmod1(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Fmod1(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFmod_(other *Scalar)() { + + err := ts.Fmod_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustFmod1_(other *Tensor)() { + + err := ts.Fmod1_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustFmodOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.FmodOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFmodOut1(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.FmodOut1(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFrac(del bool)(retVal *Tensor) { + + retVal, err := ts.Frac(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFrac_()() { + + err := ts.Frac_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustFracOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.FracOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFractionalMaxPool2dBackward(gradOutput *Tensor, kernelSize []int64, outputSize []int64, indices *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.FractionalMaxPool2dBackward(gradOutput, kernelSize, outputSize, indices, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFractionalMaxPool2dBackwardOut(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, outputSize []int64, indices *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.FractionalMaxPool2dBackwardOut(gradInput, gradOutput, kernelSize, outputSize, indices, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFractionalMaxPool3dBackward(gradOutput *Tensor, kernelSize []int64, outputSize []int64, indices *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.FractionalMaxPool3dBackward(gradOutput, kernelSize, outputSize, indices, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFractionalMaxPool3dBackwardOut(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, outputSize []int64, indices *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.FractionalMaxPool3dBackwardOut(gradInput, gradOutput, kernelSize, outputSize, indices, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFrobeniusNorm(del bool)(retVal *Tensor) { + + retVal, err := ts.FrobeniusNorm(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFrobeniusNorm1(dim []int64, keepdim bool, del bool)(retVal *Tensor) { + + retVal, err := ts.FrobeniusNorm1(dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFrobeniusNormOut(out *Tensor, dim []int64, keepdim bool, del bool)(retVal *Tensor) { + + retVal, err := ts.FrobeniusNormOut(out, dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustFromFile(filename string, shared bool, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := FromFile(filename, shared, size, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustFull(size []int64, fillValue *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := Full(size, fillValue, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustFullLike(fillValue *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.FullLike(fillValue, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustFullOut(out *Tensor, size []int64, fillValue *Scalar)(retVal *Tensor) { + + retVal, err := FullOut(out, size, fillValue) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustGather(dim int64, index *Tensor, sparseGrad bool, del bool)(retVal *Tensor) { + + retVal, err := ts.Gather(dim, index, sparseGrad, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustGatherBackward(grad *Tensor, dim int64, index *Tensor, sparseGrad bool, del bool)(retVal *Tensor) { + + retVal, err := ts.GatherBackward(grad, dim, index, sparseGrad, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustGatherOut(out *Tensor, dim int64, index *Tensor, sparseGrad bool, del bool)(retVal *Tensor) { + + retVal, err := ts.GatherOut(out, dim, index, sparseGrad, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustGcd(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Gcd(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustGcd_(other *Tensor)() { + + err := ts.Gcd_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustGcdOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.GcdOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustGe(other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.Ge(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustGe1(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Ge1(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustGe_(other *Scalar)() { + + err := ts.Ge_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustGe1_(other *Tensor)() { + + err := ts.Ge1_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustGeOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.GeOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustGeOut1(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.GeOut1(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustGelu(del bool)(retVal *Tensor) { + + retVal, err := ts.Gelu(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustGeluBackward(grad *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.GeluBackward(grad, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustGeometric_(p float64)() { + + err := ts.Geometric_(p) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustGer(vec2 *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Ger(vec2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustGerOut(out *Tensor, vec2 *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.GerOut(out, vec2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustGlu(dim int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Glu(dim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustGluBackward(gradOutput *Tensor, dim int64, del bool)(retVal *Tensor) { + + retVal, err := ts.GluBackward(gradOutput, dim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustGluBackwardOut(gradInput *Tensor, gradOutput *Tensor, dim int64, del bool)(retVal *Tensor) { + + retVal, err := ts.GluBackwardOut(gradInput, gradOutput, dim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustGluOut(out *Tensor, dim int64, del bool)(retVal *Tensor) { + + retVal, err := ts.GluOut(out, dim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustGrad(del bool)(retVal *Tensor) { + + retVal, err := ts.Grad(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustGreater(other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.Greater(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustGreater1(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Greater1(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustGreater_(other *Scalar)() { + + err := ts.Greater_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustGreater1_(other *Tensor)() { + + err := ts.Greater1_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustGreaterEqual(other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.GreaterEqual(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustGreaterEqual1(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.GreaterEqual1(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustGreaterEqual_(other *Scalar)() { + + err := ts.GreaterEqual_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustGreaterEqual1_(other *Tensor)() { + + err := ts.GreaterEqual1_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustGreaterEqualOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.GreaterEqualOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustGreaterEqualOut1(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.GreaterEqualOut1(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustGreaterOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.GreaterOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustGreaterOut1(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.GreaterOut1(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustGridSampler(input *Tensor, grid *Tensor, interpolationMode int64, paddingMode int64, alignCorners bool)(retVal *Tensor) { + + retVal, err := GridSampler(input, grid, interpolationMode, paddingMode, alignCorners) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustGridSampler2d(input *Tensor, grid *Tensor, interpolationMode int64, paddingMode int64, alignCorners bool)(retVal *Tensor) { + + retVal, err := GridSampler2d(input, grid, interpolationMode, paddingMode, alignCorners) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustGridSampler3d(input *Tensor, grid *Tensor, interpolationMode int64, paddingMode int64, alignCorners bool)(retVal *Tensor) { + + retVal, err := GridSampler3d(input, grid, interpolationMode, paddingMode, alignCorners) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustGroupNorm(input *Tensor, numGroups int64, weight *Tensor, bias *Tensor, eps float64, cudnnEnabled bool)(retVal *Tensor) { + + retVal, err := GroupNorm(input, numGroups, weight, bias, eps, cudnnEnabled) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustGruCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor)(retVal *Tensor) { + + retVal, err := GruCell(input, hx, wIh, wHh, bIh, bHh) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustGt(other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.Gt(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustGt1(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Gt1(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustGt_(other *Scalar)() { + + err := ts.Gt_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustGt1_(other *Tensor)() { + + err := ts.Gt1_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustGtOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.GtOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustGtOut1(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.GtOut1(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustHammingWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := HammingWindow(windowLength, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustHammingWindow1(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := HammingWindow1(windowLength, periodic, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustHammingWindow2(windowLength int64, periodic bool, alpha float64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := HammingWindow2(windowLength, periodic, alpha, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustHammingWindow3(windowLength int64, periodic bool, alpha float64, beta float64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := HammingWindow3(windowLength, periodic, alpha, beta, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustHannWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := HannWindow(windowLength, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustHannWindow1(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := HannWindow1(windowLength, periodic, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustHardshrink(del bool)(retVal *Tensor) { + + retVal, err := ts.Hardshrink(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustHardshrinkBackward(gradOut *Tensor, lambd *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.HardshrinkBackward(gradOut, lambd, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustHardsigmoid(del bool)(retVal *Tensor) { + + retVal, err := ts.Hardsigmoid(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustHardsigmoid_()() { + + err := ts.Hardsigmoid_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustHardsigmoidBackward(gradOutput *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.HardsigmoidBackward(gradOutput, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustHardsigmoidOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.HardsigmoidOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustHardswish(del bool)(retVal *Tensor) { + + retVal, err := ts.Hardswish(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustHardswish_()() { + + err := ts.Hardswish_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustHardswishBackward(gradOutput *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.HardswishBackward(gradOutput, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustHardswishOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.HardswishOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustHardtanh(del bool)(retVal *Tensor) { + + retVal, err := ts.Hardtanh(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustHardtanh_()() { + + err := ts.Hardtanh_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustHardtanhBackward(gradOutput *Tensor, minVal *Scalar, maxVal *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.HardtanhBackward(gradOutput, minVal, maxVal, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustHardtanhBackwardOut(gradInput *Tensor, gradOutput *Tensor, minVal *Scalar, maxVal *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.HardtanhBackwardOut(gradInput, gradOutput, minVal, maxVal, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustHardtanhOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.HardtanhOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustHeaviside(values *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Heaviside(values, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustHeaviside_(values *Tensor)() { + + err := ts.Heaviside_(values) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustHeavisideOut(out *Tensor, values *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.HeavisideOut(out, values, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustHingeEmbeddingLoss(target *Tensor, margin float64, reduction int64, del bool)(retVal *Tensor) { + + retVal, err := ts.HingeEmbeddingLoss(target, margin, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustHistc(bins int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Histc(bins, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustHistcOut(out *Tensor, bins int64, del bool)(retVal *Tensor) { + + retVal, err := ts.HistcOut(out, bins, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustHspmm(mat1 *Tensor, mat2 *Tensor)(retVal *Tensor) { + + retVal, err := Hspmm(mat1, mat2) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustHspmmOut(out *Tensor, mat1 *Tensor, mat2 *Tensor)(retVal *Tensor) { + + retVal, err := HspmmOut(out, mat1, mat2) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustHstack(tensors []Tensor)(retVal *Tensor) { + + retVal, err := Hstack(tensors) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustHstackOut(out *Tensor, tensors []Tensor)(retVal *Tensor) { + + retVal, err := HstackOut(out, tensors) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustHypot(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Hypot(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustHypot_(other *Tensor)() { + + err := ts.Hypot_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustHypotOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.HypotOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustI0(del bool)(retVal *Tensor) { + + retVal, err := ts.I0(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustI0_()() { + + err := ts.I0_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustI0Out(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.I0Out(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustIfft(signalNdim int64, normalized bool, del bool)(retVal *Tensor) { + + retVal, err := ts.Ifft(signalNdim, normalized, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustIm2col(kernelSize []int64, dilation []int64, padding []int64, stride []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Im2col(kernelSize, dilation, padding, stride, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustIm2colBackward(gradOutput *Tensor, inputSize []int64, kernelSize []int64, dilation []int64, padding []int64, stride []int64)(retVal *Tensor) { + + retVal, err := Im2colBackward(gradOutput, inputSize, kernelSize, dilation, padding, stride) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustIm2colBackwardOut(gradInput *Tensor, gradOutput *Tensor, inputSize []int64, kernelSize []int64, dilation []int64, padding []int64, stride []int64)(retVal *Tensor) { + + retVal, err := Im2colBackwardOut(gradInput, gradOutput, inputSize, kernelSize, dilation, padding, stride) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustIm2colOut(out *Tensor, kernelSize []int64, dilation []int64, padding []int64, stride []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Im2colOut(out, kernelSize, dilation, padding, stride, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustImag(del bool)(retVal *Tensor) { + + retVal, err := ts.Imag(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustIndex(indices []Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Index(indices, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustIndexAdd(dim int64, index *Tensor, source *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.IndexAdd(dim, index, source, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustIndexAdd_(dim int64, index *Tensor, source *Tensor)() { + + err := ts.IndexAdd_(dim, index, source) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustIndexCopy(dim int64, index *Tensor, source *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.IndexCopy(dim, index, source, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustIndexCopy_(dim int64, index *Tensor, source *Tensor)() { + + err := ts.IndexCopy_(dim, index, source) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustIndexFill(dim int64, index *Tensor, value *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.IndexFill(dim, index, value, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustIndexFill1(dim int64, index *Tensor, value *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.IndexFill1(dim, index, value, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustIndexFill_(dim int64, index *Tensor, value *Scalar)() { + + err := ts.IndexFill_(dim, index, value) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustIndexFill1_(dim int64, index *Tensor, value *Tensor)() { + + err := ts.IndexFill1_(dim, index, value) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustIndexPut(indices []Tensor, values *Tensor, accumulate bool, del bool)(retVal *Tensor) { + + retVal, err := ts.IndexPut(indices, values, accumulate, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustIndexPut_(indices []Tensor, values *Tensor, accumulate bool)() { + + err := ts.IndexPut_(indices, values, accumulate) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustIndexSelect(dim int64, index *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.IndexSelect(dim, index, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustIndexSelectBackward(grad *Tensor, selfSizes []int64, dim int64, index *Tensor)(retVal *Tensor) { + + retVal, err := IndexSelectBackward(grad, selfSizes, dim, index) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustIndexSelectOut(out *Tensor, dim int64, index *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.IndexSelectOut(out, dim, index, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustIndices(del bool)(retVal *Tensor) { + + retVal, err := ts.Indices(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustInfinitelyDifferentiableGeluBackward(grad *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.InfinitelyDifferentiableGeluBackward(grad, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustInstanceNorm(input *Tensor, weight *Tensor, bias *Tensor, runningMean *Tensor, runningVar *Tensor, useInputStats bool, momentum float64, eps float64, cudnnEnabled bool)(retVal *Tensor) { + + retVal, err := InstanceNorm(input, weight, bias, runningMean, runningVar, useInputStats, momentum, eps, cudnnEnabled) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustIntRepr(del bool)(retVal *Tensor) { + + retVal, err := ts.IntRepr(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustInverse(del bool)(retVal *Tensor) { + + retVal, err := ts.Inverse(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustInverseOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.InverseOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustIrfft(signalNdim int64, normalized bool, onesided bool, signalSizes []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Irfft(signalNdim, normalized, onesided, signalSizes, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustIsclose(other *Tensor, rtol float64, atol float64, equalNan bool, del bool)(retVal *Tensor) { + + retVal, err := ts.Isclose(other, rtol, atol, equalNan, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustIsfinite(del bool)(retVal *Tensor) { + + retVal, err := ts.Isfinite(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustIsinf(del bool)(retVal *Tensor) { + + retVal, err := ts.Isinf(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustIsnan(del bool)(retVal *Tensor) { + + retVal, err := ts.Isnan(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustIsneginf(del bool)(retVal *Tensor) { + + retVal, err := ts.Isneginf(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustIsneginfOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.IsneginfOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustIsposinf(del bool)(retVal *Tensor) { + + retVal, err := ts.Isposinf(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustIsposinfOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.IsposinfOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustIsreal(del bool)(retVal *Tensor) { + + retVal, err := ts.Isreal(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustIstft(nFft int64, hopLength []int64, winLength []int64, window *Tensor, center bool, normalized bool, onesided bool, length []int64, returnComplex bool, del bool)(retVal *Tensor) { + + retVal, err := ts.Istft(nFft, hopLength, winLength, window, center, normalized, onesided, length, returnComplex, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustKaiserWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := KaiserWindow(windowLength, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustKaiserWindow1(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := KaiserWindow1(windowLength, periodic, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustKaiserWindow2(windowLength int64, periodic bool, beta float64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := KaiserWindow2(windowLength, periodic, beta, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustKlDiv(target *Tensor, reduction int64, logTarget bool, del bool)(retVal *Tensor) { + + retVal, err := ts.KlDiv(target, reduction, logTarget, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustKlDivBackward(gradOutput *Tensor, target *Tensor, reduction int64, logTarget bool, del bool)(retVal *Tensor) { + + retVal, err := ts.KlDivBackward(gradOutput, target, reduction, logTarget, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustL1Loss(target *Tensor, reduction int64, del bool)(retVal *Tensor) { + + retVal, err := ts.L1Loss(target, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustL1LossBackward(gradOutput *Tensor, target *Tensor, reduction int64, del bool)(retVal *Tensor) { + + retVal, err := ts.L1LossBackward(gradOutput, target, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustL1LossBackwardOut(gradInput *Tensor, gradOutput *Tensor, target *Tensor, reduction int64, del bool)(retVal *Tensor) { + + retVal, err := ts.L1LossBackwardOut(gradInput, gradOutput, target, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustL1LossOut(out *Tensor, target *Tensor, reduction int64, del bool)(retVal *Tensor) { + + retVal, err := ts.L1LossOut(out, target, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustLayerNorm(input *Tensor, normalizedShape []int64, weight *Tensor, bias *Tensor, eps float64, cudnnEnable bool)(retVal *Tensor) { + + retVal, err := LayerNorm(input, normalizedShape, weight, bias, eps, cudnnEnable) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLcm(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Lcm(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLcm_(other *Tensor)() { + + err := ts.Lcm_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustLcmOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.LcmOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLe(other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.Le(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLe1(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Le1(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLe_(other *Scalar)() { + + err := ts.Le_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustLe1_(other *Tensor)() { + + err := ts.Le1_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustLeOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.LeOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLeOut1(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.LeOut1(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLeakyRelu(del bool)(retVal *Tensor) { + + retVal, err := ts.LeakyRelu(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLeakyRelu_()() { + + err := ts.LeakyRelu_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustLeakyReluBackward(gradOutput *Tensor, negativeSlope *Scalar, selfIsResult bool, del bool)(retVal *Tensor) { + + retVal, err := ts.LeakyReluBackward(gradOutput, negativeSlope, selfIsResult, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLeakyReluOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.LeakyReluOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLerp(end *Tensor, weight *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.Lerp(end, weight, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLerp1(end *Tensor, weight *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Lerp1(end, weight, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLerp_(end *Tensor, weight *Scalar)() { + + err := ts.Lerp_(end, weight) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustLerp1_(end *Tensor, weight *Tensor)() { + + err := ts.Lerp1_(end, weight) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustLerpOut(out *Tensor, end *Tensor, weight *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.LerpOut(out, end, weight, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLerpOut1(out *Tensor, end *Tensor, weight *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.LerpOut1(out, end, weight, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLess(other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.Less(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLess1(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Less1(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLess_(other *Scalar)() { + + err := ts.Less_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustLess1_(other *Tensor)() { + + err := ts.Less1_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustLessEqual(other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.LessEqual(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLessEqual1(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.LessEqual1(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLessEqual_(other *Scalar)() { + + err := ts.LessEqual_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustLessEqual1_(other *Tensor)() { + + err := ts.LessEqual1_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustLessEqualOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.LessEqualOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLessEqualOut1(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.LessEqualOut1(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLessOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.LessOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLessOut1(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.LessOut1(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLgamma(del bool)(retVal *Tensor) { + + retVal, err := ts.Lgamma(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLgamma_()() { + + err := ts.Lgamma_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustLgammaOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.LgammaOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLinalgDet(del bool)(retVal *Tensor) { + + retVal, err := ts.LinalgDet(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLinalgNorm(ord *Scalar, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts.LinalgNorm(ord, dim, keepdim, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLinalgNorm1(ord string, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts.LinalgNorm1(ord, dim, keepdim, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLinalgNormOut(out *Tensor, ord *Scalar, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts.LinalgNormOut(out, ord, dim, keepdim, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLinalgNormOut1(out *Tensor, ord string, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts.LinalgNormOut1(out, ord, dim, keepdim, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustLinear(input *Tensor, weight *Tensor, bias *Tensor)(retVal *Tensor) { + + retVal, err := Linear(input, weight, bias) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustLinspace(start *Scalar, end *Scalar, steps []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := Linspace(start, end, steps, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustLinspaceOut(out *Tensor, start *Scalar, end *Scalar, steps []int64)(retVal *Tensor) { + + retVal, err := LinspaceOut(out, start, end, steps) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLog(del bool)(retVal *Tensor) { + + retVal, err := ts.Log(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLog10(del bool)(retVal *Tensor) { + + retVal, err := ts.Log10(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLog10_()() { + + err := ts.Log10_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustLog10Out(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Log10Out(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLog1p(del bool)(retVal *Tensor) { + + retVal, err := ts.Log1p(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLog1p_()() { + + err := ts.Log1p_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustLog1pOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Log1pOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLog2(del bool)(retVal *Tensor) { + + retVal, err := ts.Log2(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLog2_()() { + + err := ts.Log2_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustLog2Out(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Log2Out(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLog_()() { + + err := ts.Log_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustLogNormal_(mean float64, std float64)() { + + err := ts.LogNormal_(mean, std) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustLogOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.LogOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLogSigmoid(del bool)(retVal *Tensor) { + + retVal, err := ts.LogSigmoid(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLogSigmoidBackward(gradOutput *Tensor, buffer *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.LogSigmoidBackward(gradOutput, buffer, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLogSigmoidBackwardOut(gradInput *Tensor, gradOutput *Tensor, buffer *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.LogSigmoidBackwardOut(gradInput, gradOutput, buffer, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLogSigmoidOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.LogSigmoidOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLogSoftmax(dim int64, dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts.LogSoftmax(dim, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLogaddexp(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Logaddexp(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLogaddexp2(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Logaddexp2(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLogaddexp2Out(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Logaddexp2Out(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLogaddexpOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.LogaddexpOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLogcumsumexp(dim int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Logcumsumexp(dim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLogcumsumexpOut(out *Tensor, dim int64, del bool)(retVal *Tensor) { + + retVal, err := ts.LogcumsumexpOut(out, dim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLogdet(del bool)(retVal *Tensor) { + + retVal, err := ts.Logdet(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLogicalAnd(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.LogicalAnd(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLogicalAnd_(other *Tensor)() { + + err := ts.LogicalAnd_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustLogicalAndOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.LogicalAndOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLogicalNot(del bool)(retVal *Tensor) { + + retVal, err := ts.LogicalNot(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLogicalNot_()() { + + err := ts.LogicalNot_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustLogicalNotOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.LogicalNotOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLogicalOr(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.LogicalOr(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLogicalOr_(other *Tensor)() { + + err := ts.LogicalOr_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustLogicalOrOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.LogicalOrOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLogicalXor(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.LogicalXor(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLogicalXor_(other *Tensor)() { + + err := ts.LogicalXor_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustLogicalXorOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.LogicalXorOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLogit(eps []float64, del bool)(retVal *Tensor) { + + retVal, err := ts.Logit(eps, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLogit_(eps []float64)() { + + err := ts.Logit_(eps) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustLogitBackward(gradOutput *Tensor, eps []float64, del bool)(retVal *Tensor) { + + retVal, err := ts.LogitBackward(gradOutput, eps, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLogitBackwardOut(gradInput *Tensor, gradOutput *Tensor, eps []float64, del bool)(retVal *Tensor) { + + retVal, err := ts.LogitBackwardOut(gradInput, gradOutput, eps, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLogitOut(out *Tensor, eps []float64, del bool)(retVal *Tensor) { + + retVal, err := ts.LogitOut(out, eps, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustLogspace(start *Scalar, end *Scalar, steps []int64, base float64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := Logspace(start, end, steps, base, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustLogspaceOut(out *Tensor, start *Scalar, end *Scalar, steps []int64, base float64)(retVal *Tensor) { + + retVal, err := LogspaceOut(out, start, end, steps, base) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLogsumexp(dim []int64, keepdim bool, del bool)(retVal *Tensor) { + + retVal, err := ts.Logsumexp(dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLogsumexpOut(out *Tensor, dim []int64, keepdim bool, del bool)(retVal *Tensor) { + + retVal, err := ts.LogsumexpOut(out, dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLt(other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.Lt(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLt1(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Lt1(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLt_(other *Scalar)() { + + err := ts.Lt_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustLt1_(other *Tensor)() { + + err := ts.Lt1_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustLtOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.LtOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLtOut1(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.LtOut1(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLuSolve(lUData *Tensor, lUPivots *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.LuSolve(lUData, lUPivots, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustLuSolveOut(out *Tensor, lUData *Tensor, lUPivots *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.LuSolveOut(out, lUData, lUPivots, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustMarginRankingLoss(input1 *Tensor, input2 *Tensor, target *Tensor, margin float64, reduction int64)(retVal *Tensor) { + + retVal, err := MarginRankingLoss(input1, input2, target, margin, reduction) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMaskedFill(mask *Tensor, value *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.MaskedFill(mask, value, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMaskedFill1(mask *Tensor, value *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.MaskedFill1(mask, value, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMaskedFill_(mask *Tensor, value *Scalar)() { + + err := ts.MaskedFill_(mask, value) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustMaskedFill1_(mask *Tensor, value *Tensor)() { + + err := ts.MaskedFill1_(mask, value) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustMaskedScatter(mask *Tensor, source *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.MaskedScatter(mask, source, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMaskedScatter_(mask *Tensor, source *Tensor)() { + + err := ts.MaskedScatter_(mask, source) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustMaskedSelect(mask *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.MaskedSelect(mask, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustMaskedSelectBackward(grad *Tensor, input *Tensor, mask *Tensor)(retVal *Tensor) { + + retVal, err := MaskedSelectBackward(grad, input, mask) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMaskedSelectOut(out *Tensor, mask *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.MaskedSelectOut(out, mask, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMatmul(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Matmul(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMatmulOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.MatmulOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMatrixExp(del bool)(retVal *Tensor) { + + retVal, err := ts.MatrixExp(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMatrixExpBackward(grad *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.MatrixExpBackward(grad, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMatrixPower(n int64, del bool)(retVal *Tensor) { + + retVal, err := ts.MatrixPower(n, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMatrixRank(symmetric bool, del bool)(retVal *Tensor) { + + retVal, err := ts.MatrixRank(symmetric, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMatrixRank1(tol float64, symmetric bool, del bool)(retVal *Tensor) { + + retVal, err := ts.MatrixRank1(tol, symmetric, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMax(del bool)(retVal *Tensor) { + + retVal, err := ts.Max(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMax1(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Max1(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMaxOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.MaxOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMaxPool1d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal *Tensor) { + + retVal, err := ts.MaxPool1d(kernelSize, stride, padding, dilation, ceilMode, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMaxPool2d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal *Tensor) { + + retVal, err := ts.MaxPool2d(kernelSize, stride, padding, dilation, ceilMode, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMaxPool2dWithIndicesBackward(gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, indices *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.MaxPool2dWithIndicesBackward(gradOutput, kernelSize, stride, padding, dilation, ceilMode, indices, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMaxPool2dWithIndicesBackwardOut(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, indices *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.MaxPool2dWithIndicesBackwardOut(gradInput, gradOutput, kernelSize, stride, padding, dilation, ceilMode, indices, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMaxPool3d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal *Tensor) { + + retVal, err := ts.MaxPool3d(kernelSize, stride, padding, dilation, ceilMode, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMaxPool3dWithIndicesBackward(gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, indices *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.MaxPool3dWithIndicesBackward(gradOutput, kernelSize, stride, padding, dilation, ceilMode, indices, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMaxPool3dWithIndicesBackwardOut(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, indices *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.MaxPool3dWithIndicesBackwardOut(gradInput, gradOutput, kernelSize, stride, padding, dilation, ceilMode, indices, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMaxUnpool2d(indices *Tensor, outputSize []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.MaxUnpool2d(indices, outputSize, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMaxUnpool2dBackward(gradOutput *Tensor, indices *Tensor, outputSize []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.MaxUnpool2dBackward(gradOutput, indices, outputSize, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMaxUnpool2dBackwardOut(gradInput *Tensor, gradOutput *Tensor, indices *Tensor, outputSize []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.MaxUnpool2dBackwardOut(gradInput, gradOutput, indices, outputSize, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMaxUnpool2dOut(out *Tensor, indices *Tensor, outputSize []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.MaxUnpool2dOut(out, indices, outputSize, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMaxUnpool3d(indices *Tensor, outputSize []int64, stride []int64, padding []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.MaxUnpool3d(indices, outputSize, stride, padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMaxUnpool3dBackward(gradOutput *Tensor, indices *Tensor, outputSize []int64, stride []int64, padding []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.MaxUnpool3dBackward(gradOutput, indices, outputSize, stride, padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMaxUnpool3dBackwardOut(gradInput *Tensor, gradOutput *Tensor, indices *Tensor, outputSize []int64, stride []int64, padding []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.MaxUnpool3dBackwardOut(gradInput, gradOutput, indices, outputSize, stride, padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMaxUnpool3dOut(out *Tensor, indices *Tensor, outputSize []int64, stride []int64, padding []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.MaxUnpool3dOut(out, indices, outputSize, stride, padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMaximum(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Maximum(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMaximumOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.MaximumOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMean(dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts.Mean(dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMean1(dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts.Mean1(dim, keepdim, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMeanOut(out *Tensor, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts.MeanOut(out, dim, keepdim, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMedian(del bool)(retVal *Tensor) { + + retVal, err := ts.Median(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMin(del bool)(retVal *Tensor) { + + retVal, err := ts.Min(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMin1(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Min1(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMinOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.MinOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMinimum(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Minimum(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMinimumOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.MinimumOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMiopenConvolution(weight *Tensor, bias *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal *Tensor) { + + retVal, err := ts.MiopenConvolution(weight, bias, padding, stride, dilation, groups, benchmark, deterministic, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustMiopenConvolutionBackwardBias(gradOutput *Tensor)(retVal *Tensor) { + + retVal, err := MiopenConvolutionBackwardBias(gradOutput) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustMiopenConvolutionBackwardInput(selfSize []int64, gradOutput *Tensor, weight *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool)(retVal *Tensor) { + + retVal, err := MiopenConvolutionBackwardInput(selfSize, gradOutput, weight, padding, stride, dilation, groups, benchmark, deterministic) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMiopenConvolutionBackwardWeight(weightSize []int64, gradOutput *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal *Tensor) { + + retVal, err := ts.MiopenConvolutionBackwardWeight(weightSize, gradOutput, padding, stride, dilation, groups, benchmark, deterministic, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMiopenConvolutionTranspose(weight *Tensor, bias *Tensor, padding []int64, outputPadding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal *Tensor) { + + retVal, err := ts.MiopenConvolutionTranspose(weight, bias, padding, outputPadding, stride, dilation, groups, benchmark, deterministic, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustMiopenConvolutionTransposeBackwardInput(gradOutput *Tensor, weight *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool)(retVal *Tensor) { + + retVal, err := MiopenConvolutionTransposeBackwardInput(gradOutput, weight, padding, stride, dilation, groups, benchmark, deterministic) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMiopenConvolutionTransposeBackwardWeight(weightSize []int64, gradOutput *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal *Tensor) { + + retVal, err := ts.MiopenConvolutionTransposeBackwardWeight(weightSize, gradOutput, padding, stride, dilation, groups, benchmark, deterministic, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMiopenDepthwiseConvolution(weight *Tensor, bias *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal *Tensor) { + + retVal, err := ts.MiopenDepthwiseConvolution(weight, bias, padding, stride, dilation, groups, benchmark, deterministic, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustMiopenDepthwiseConvolutionBackwardInput(selfSize []int64, gradOutput *Tensor, weight *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool)(retVal *Tensor) { + + retVal, err := MiopenDepthwiseConvolutionBackwardInput(selfSize, gradOutput, weight, padding, stride, dilation, groups, benchmark, deterministic) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMiopenDepthwiseConvolutionBackwardWeight(weightSize []int64, gradOutput *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool)(retVal *Tensor) { + + retVal, err := ts.MiopenDepthwiseConvolutionBackwardWeight(weightSize, gradOutput, padding, stride, dilation, groups, benchmark, deterministic, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMkldnnAdaptiveAvgPool2d(outputSize []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.MkldnnAdaptiveAvgPool2d(outputSize, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMkldnnConvolution(weight *Tensor, bias *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, del bool)(retVal *Tensor) { + + retVal, err := ts.MkldnnConvolution(weight, bias, padding, stride, dilation, groups, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustMkldnnConvolutionBackwardInput(selfSize []int64, gradOutput *Tensor, weight *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, biasDefined bool)(retVal *Tensor) { + + retVal, err := MkldnnConvolutionBackwardInput(selfSize, gradOutput, weight, padding, stride, dilation, groups, biasDefined) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustMkldnnLinear(input *Tensor, weight *Tensor, bias *Tensor)(retVal *Tensor) { + + retVal, err := MkldnnLinear(input, weight, bias) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMkldnnMaxPool2d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal *Tensor) { + + retVal, err := ts.MkldnnMaxPool2d(kernelSize, stride, padding, dilation, ceilMode, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMkldnnMaxPool3d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal *Tensor) { + + retVal, err := ts.MkldnnMaxPool3d(kernelSize, stride, padding, dilation, ceilMode, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMkldnnReorderConv2dWeight(padding []int64, stride []int64, dilation []int64, groups int64, del bool)(retVal *Tensor) { + + retVal, err := ts.MkldnnReorderConv2dWeight(padding, stride, dilation, groups, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMkldnnReorderConv3dWeight(padding []int64, stride []int64, dilation []int64, groups int64, del bool)(retVal *Tensor) { + + retVal, err := ts.MkldnnReorderConv3dWeight(padding, stride, dilation, groups, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMm(mat2 *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Mm(mat2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMmOut(out *Tensor, mat2 *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.MmOut(out, mat2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMovedim(source []int64, destination []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Movedim(source, destination, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMovedim1(source int64, destination int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Movedim1(source, destination, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMseLoss(target *Tensor, reduction int64, del bool)(retVal *Tensor) { + + retVal, err := ts.MseLoss(target, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMseLossBackward(gradOutput *Tensor, target *Tensor, reduction int64, del bool)(retVal *Tensor) { + + retVal, err := ts.MseLossBackward(gradOutput, target, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMseLossBackwardOut(gradInput *Tensor, gradOutput *Tensor, target *Tensor, reduction int64, del bool)(retVal *Tensor) { + + retVal, err := ts.MseLossBackwardOut(gradInput, gradOutput, target, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMseLossOut(out *Tensor, target *Tensor, reduction int64, del bool)(retVal *Tensor) { + + retVal, err := ts.MseLossOut(out, target, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMul(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Mul(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMul1(other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.Mul1(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMul_(other *Tensor)() { + + err := ts.Mul_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustMul1_(other *Scalar)() { + + err := ts.Mul1_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustMulOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.MulOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMultiMarginLossBackward(gradOutput *Tensor, target *Tensor, p *Scalar, margin *Scalar, weight *Tensor, reduction int64, del bool)(retVal *Tensor) { + + retVal, err := ts.MultiMarginLossBackward(gradOutput, target, p, margin, weight, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMultiMarginLossBackwardOut(gradInput *Tensor, gradOutput *Tensor, target *Tensor, p *Scalar, margin *Scalar, weight *Tensor, reduction int64, del bool)(retVal *Tensor) { + + retVal, err := ts.MultiMarginLossBackwardOut(gradInput, gradOutput, target, p, margin, weight, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMultilabelMarginLoss(target *Tensor, reduction int64, del bool)(retVal *Tensor) { + + retVal, err := ts.MultilabelMarginLoss(target, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMultilabelMarginLossBackward(gradOutput *Tensor, target *Tensor, reduction int64, isTarget *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.MultilabelMarginLossBackward(gradOutput, target, reduction, isTarget, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMultilabelMarginLossBackwardOut(gradInput *Tensor, gradOutput *Tensor, target *Tensor, reduction int64, isTarget *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.MultilabelMarginLossBackwardOut(gradInput, gradOutput, target, reduction, isTarget, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMultilabelMarginLossOut(out *Tensor, target *Tensor, reduction int64, del bool)(retVal *Tensor) { + + retVal, err := ts.MultilabelMarginLossOut(out, target, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMultinomial(numSamples int64, replacement bool, del bool)(retVal *Tensor) { + + retVal, err := ts.Multinomial(numSamples, replacement, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMultinomialOut(out *Tensor, numSamples int64, replacement bool, del bool)(retVal *Tensor) { + + retVal, err := ts.MultinomialOut(out, numSamples, replacement, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMultiply(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Multiply(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMultiply1(other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.Multiply1(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMultiply_(other *Tensor)() { + + err := ts.Multiply_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustMultiply1_(other *Scalar)() { + + err := ts.Multiply1_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustMultiplyOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.MultiplyOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMv(vec *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Mv(vec, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMvOut(out *Tensor, vec *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.MvOut(out, vec, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMvlgamma(p int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Mvlgamma(p, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustMvlgamma_(p int64)() { + + err := ts.Mvlgamma_(p) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustNanquantile(q float64, dim []int64, keepdim bool, del bool)(retVal *Tensor) { + + retVal, err := ts.Nanquantile(q, dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNanquantile1(q *Tensor, dim []int64, keepdim bool, del bool)(retVal *Tensor) { + + retVal, err := ts.Nanquantile1(q, dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNanquantileOut(out *Tensor, q float64, dim []int64, keepdim bool, del bool)(retVal *Tensor) { + + retVal, err := ts.NanquantileOut(out, q, dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNanquantileOut1(out *Tensor, q *Tensor, dim []int64, keepdim bool, del bool)(retVal *Tensor) { + + retVal, err := ts.NanquantileOut1(out, q, dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNansum(dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts.Nansum(dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNansum1(dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts.Nansum1(dim, keepdim, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNansumOut(out *Tensor, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts.NansumOut(out, dim, keepdim, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNarrow(dim int64, start int64, length int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Narrow(dim, start, length, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNarrow1(dim int64, start *Tensor, length int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Narrow1(dim, start, length, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNarrowCopy(dim int64, start int64, length int64, del bool)(retVal *Tensor) { + + retVal, err := ts.NarrowCopy(dim, start, length, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNativeNorm(del bool)(retVal *Tensor) { + + retVal, err := ts.NativeNorm(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNativeNorm1(p *Scalar, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts.NativeNorm1(p, dim, keepdim, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNe(other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.Ne(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNe1(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Ne1(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNe_(other *Scalar)() { + + err := ts.Ne_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustNe1_(other *Tensor)() { + + err := ts.Ne1_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustNeOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.NeOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNeOut1(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.NeOut1(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNeg(del bool)(retVal *Tensor) { + + retVal, err := ts.Neg(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNeg_()() { + + err := ts.Neg_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustNegOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.NegOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNegative(del bool)(retVal *Tensor) { + + retVal, err := ts.Negative(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNegative_()() { + + err := ts.Negative_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustNegativeOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.NegativeOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNewEmpty(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device, del bool)(retVal *Tensor) { + + retVal, err := ts.NewEmpty(size, optionsKind, optionsDevice, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNewFull(size []int64, fillValue *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device, del bool)(retVal *Tensor) { + + retVal, err := ts.NewFull(size, fillValue, optionsKind, optionsDevice, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNewZeros(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device, del bool)(retVal *Tensor) { + + retVal, err := ts.NewZeros(size, optionsKind, optionsDevice, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNextafter(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Nextafter(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNextafter_(other *Tensor)() { + + err := ts.Nextafter_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustNextafterOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.NextafterOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNllLoss(target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, del bool)(retVal *Tensor) { + + retVal, err := ts.NllLoss(target, weight, reduction, ignoreIndex, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNllLoss2d(target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, del bool)(retVal *Tensor) { + + retVal, err := ts.NllLoss2d(target, weight, reduction, ignoreIndex, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNllLoss2dBackward(gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, totalWeight *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.NllLoss2dBackward(gradOutput, target, weight, reduction, ignoreIndex, totalWeight, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNllLoss2dBackwardOut(gradInput *Tensor, gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, totalWeight *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.NllLoss2dBackwardOut(gradInput, gradOutput, target, weight, reduction, ignoreIndex, totalWeight, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNllLoss2dOut(out *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, del bool)(retVal *Tensor) { + + retVal, err := ts.NllLoss2dOut(out, target, weight, reduction, ignoreIndex, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNllLossBackward(gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, totalWeight *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.NllLossBackward(gradOutput, target, weight, reduction, ignoreIndex, totalWeight, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNllLossBackwardOut(gradInput *Tensor, gradOutput *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, totalWeight *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.NllLossBackwardOut(gradInput, gradOutput, target, weight, reduction, ignoreIndex, totalWeight, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNllLossOut(out *Tensor, target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, del bool)(retVal *Tensor) { + + retVal, err := ts.NllLossOut(out, target, weight, reduction, ignoreIndex, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNonzero(del bool)(retVal *Tensor) { + + retVal, err := ts.Nonzero(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNonzeroOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.NonzeroOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNorm(del bool)(retVal *Tensor) { + + retVal, err := ts.Norm(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNorm1(p *Scalar, dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts.Norm1(p, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNorm2(p *Scalar, dim []int64, keepdim bool, del bool)(retVal *Tensor) { + + retVal, err := ts.Norm2(p, dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNorm3(p *Scalar, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts.Norm3(p, dim, keepdim, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustNormExceptDim(v *Tensor, pow int64, dim int64)(retVal *Tensor) { + + retVal, err := NormExceptDim(v, pow, dim) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNormOut(out *Tensor, p *Scalar, dim []int64, keepdim bool, del bool)(retVal *Tensor) { + + retVal, err := ts.NormOut(out, p, dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNormOut1(out *Tensor, p *Scalar, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts.NormOut1(out, p, dim, keepdim, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNormal_(mean float64, std float64)() { + + err := ts.Normal_(mean, std) + if err != nil { log.Fatal(err) } + + return +} + +func MustNormalOut(out *Tensor, mean *Tensor, std float64)(retVal *Tensor) { + + retVal, err := NormalOut(out, mean, std) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustNormalOut1(out *Tensor, mean float64, std *Tensor)(retVal *Tensor) { + + retVal, err := NormalOut1(out, mean, std) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustNormalOut2(out *Tensor, mean *Tensor, std *Tensor)(retVal *Tensor) { + + retVal, err := NormalOut2(out, mean, std) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustNormalOut3(out *Tensor, mean float64, std float64, size []int64)(retVal *Tensor) { + + retVal, err := NormalOut3(out, mean, std, size) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNotEqual(other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.NotEqual(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNotEqual1(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.NotEqual1(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNotEqual_(other *Scalar)() { + + err := ts.NotEqual_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustNotEqual1_(other *Tensor)() { + + err := ts.NotEqual1_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustNotEqualOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.NotEqualOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNotEqualOut1(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.NotEqualOut1(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNuclearNorm(keepdim bool, del bool)(retVal *Tensor) { + + retVal, err := ts.NuclearNorm(keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNuclearNorm1(dim []int64, keepdim bool, del bool)(retVal *Tensor) { + + retVal, err := ts.NuclearNorm1(dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNuclearNormOut(out *Tensor, keepdim bool, del bool)(retVal *Tensor) { + + retVal, err := ts.NuclearNormOut(out, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNuclearNormOut1(out *Tensor, dim []int64, keepdim bool, del bool)(retVal *Tensor) { + + retVal, err := ts.NuclearNormOut1(out, dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustNumpyT(del bool)(retVal *Tensor) { + + retVal, err := ts.NumpyT(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustOneHot(numClasses int64, del bool)(retVal *Tensor) { + + retVal, err := ts.OneHot(numClasses, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustOnes(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := Ones(size, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustOnesLike(del bool)(retVal *Tensor) { + + retVal, err := ts.OnesLike(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustOnesOut(out *Tensor, size []int64)(retVal *Tensor) { + + retVal, err := OnesOut(out, size) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustOrgqr(input2 *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Orgqr(input2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustOrgqrOut(out *Tensor, input2 *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.OrgqrOut(out, input2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustOrmqr(input2 *Tensor, input3 *Tensor, left bool, transpose bool, del bool)(retVal *Tensor) { + + retVal, err := ts.Ormqr(input2, input3, left, transpose, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustOrmqrOut(out *Tensor, input2 *Tensor, input3 *Tensor, left bool, transpose bool, del bool)(retVal *Tensor) { + + retVal, err := ts.OrmqrOut(out, input2, input3, left, transpose, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustOuter(vec2 *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Outer(vec2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustOuterOut(out *Tensor, vec2 *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.OuterOut(out, vec2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustPairwiseDistance(x1 *Tensor, x2 *Tensor, p float64, eps float64, keepdim bool)(retVal *Tensor) { + + retVal, err := PairwiseDistance(x1, x2, p, eps, keepdim) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustPdist(p float64, del bool)(retVal *Tensor) { + + retVal, err := ts.Pdist(p, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustPermute(dims []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Permute(dims, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustPinMemory(del bool)(retVal *Tensor) { + + retVal, err := ts.PinMemory(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustPinverse(rcond float64, del bool)(retVal *Tensor) { + + retVal, err := ts.Pinverse(rcond, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustPixelShuffle(upscaleFactor int64, del bool)(retVal *Tensor) { + + retVal, err := ts.PixelShuffle(upscaleFactor, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustPoisson(del bool)(retVal *Tensor) { + + retVal, err := ts.Poisson(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustPoissonNllLoss(input *Tensor, target *Tensor, logInput bool, full bool, eps float64, reduction int64)(retVal *Tensor) { + + retVal, err := PoissonNllLoss(input, target, logInput, full, eps, reduction) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustPolar(abs *Tensor, angle *Tensor)(retVal *Tensor) { + + retVal, err := Polar(abs, angle) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustPolarOut(out *Tensor, abs *Tensor, angle *Tensor)(retVal *Tensor) { + + retVal, err := PolarOut(out, abs, angle) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustPolygamma(n int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Polygamma(n, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustPolygamma_(n int64)() { + + err := ts.Polygamma_(n) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustPolygammaOut(out *Tensor, n int64, del bool)(retVal *Tensor) { + + retVal, err := ts.PolygammaOut(out, n, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustPow(exponent *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.Pow(exponent, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustPow1(exponent *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Pow1(exponent, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustPow2(selfScalar *Scalar, exponent *Tensor)(retVal *Tensor) { + + retVal, err := Pow2(selfScalar, exponent) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustPow_(exponent *Scalar)() { + + err := ts.Pow_(exponent) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustPow1_(exponent *Tensor)() { + + err := ts.Pow1_(exponent) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustPowOut(out *Tensor, exponent *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.PowOut(out, exponent, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustPowOut1(out *Tensor, exponent *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.PowOut1(out, exponent, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustPowOut2(out *Tensor, selfScalar *Scalar, exponent *Tensor)(retVal *Tensor) { + + retVal, err := PowOut2(out, selfScalar, exponent) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustPrelu(weight *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Prelu(weight, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustProd(dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts.Prod(dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustProd1(dim int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts.Prod1(dim, keepdim, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustProdOut(out *Tensor, dim int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts.ProdOut(out, dim, keepdim, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustPut_(index *Tensor, source *Tensor, accumulate bool)() { + + err := ts.Put_(index, source, accumulate) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustQPerChannelScales(del bool)(retVal *Tensor) { + + retVal, err := ts.QPerChannelScales(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustQPerChannelZeroPoints(del bool)(retVal *Tensor) { + + retVal, err := ts.QPerChannelZeroPoints(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustQuantile(q float64, dim []int64, keepdim bool, del bool)(retVal *Tensor) { + + retVal, err := ts.Quantile(q, dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustQuantile1(q *Tensor, dim []int64, keepdim bool, del bool)(retVal *Tensor) { + + retVal, err := ts.Quantile1(q, dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustQuantileOut(out *Tensor, q float64, dim []int64, keepdim bool, del bool)(retVal *Tensor) { + + retVal, err := ts.QuantileOut(out, q, dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustQuantileOut1(out *Tensor, q *Tensor, dim []int64, keepdim bool, del bool)(retVal *Tensor) { + + retVal, err := ts.QuantileOut1(out, q, dim, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustQuantizePerChannel(scales *Tensor, zeroPoints *Tensor, axis int64, dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts.QuantizePerChannel(scales, zeroPoints, axis, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustQuantizePerTensor(scale float64, zeroPoint int64, dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts.QuantizePerTensor(scale, zeroPoint, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustQuantizedBatchNorm(input *Tensor, weight *Tensor, bias *Tensor, mean *Tensor, vari *Tensor, eps float64, outputScale float64, outputZeroPoint int64)(retVal *Tensor) { + + retVal, err := QuantizedBatchNorm(input, weight, bias, mean, vari, eps, outputScale, outputZeroPoint) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustQuantizedGruCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor, packedIh *Tensor, packedHh *Tensor, colOffsetsIh *Tensor, colOffsetsHh *Tensor, scaleIh *Scalar, scaleHh *Scalar, zeroPointIh *Scalar, zeroPointHh *Scalar)(retVal *Tensor) { + + retVal, err := QuantizedGruCell(input, hx, wIh, wHh, bIh, bHh, packedIh, packedHh, colOffsetsIh, colOffsetsHh, scaleIh, scaleHh, zeroPointIh, zeroPointHh) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustQuantizedMaxPool1d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal *Tensor) { + + retVal, err := ts.QuantizedMaxPool1d(kernelSize, stride, padding, dilation, ceilMode, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustQuantizedMaxPool2d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool)(retVal *Tensor) { + + retVal, err := ts.QuantizedMaxPool2d(kernelSize, stride, padding, dilation, ceilMode, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustQuantizedRnnReluCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor, packedIh *Tensor, packedHh *Tensor, colOffsetsIh *Tensor, colOffsetsHh *Tensor, scaleIh *Scalar, scaleHh *Scalar, zeroPointIh *Scalar, zeroPointHh *Scalar)(retVal *Tensor) { + + retVal, err := QuantizedRnnReluCell(input, hx, wIh, wHh, bIh, bHh, packedIh, packedHh, colOffsetsIh, colOffsetsHh, scaleIh, scaleHh, zeroPointIh, zeroPointHh) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustQuantizedRnnTanhCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor, packedIh *Tensor, packedHh *Tensor, colOffsetsIh *Tensor, colOffsetsHh *Tensor, scaleIh *Scalar, scaleHh *Scalar, zeroPointIh *Scalar, zeroPointHh *Scalar)(retVal *Tensor) { + + retVal, err := QuantizedRnnTanhCell(input, hx, wIh, wHh, bIh, bHh, packedIh, packedHh, colOffsetsIh, colOffsetsHh, scaleIh, scaleHh, zeroPointIh, zeroPointHh) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustRad2deg(del bool)(retVal *Tensor) { + + retVal, err := ts.Rad2deg(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustRad2deg_()() { + + err := ts.Rad2deg_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustRad2degOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Rad2degOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustRand(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := Rand(size, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustRandLike(del bool)(retVal *Tensor) { + + retVal, err := ts.RandLike(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustRandOut(out *Tensor, size []int64)(retVal *Tensor) { + + retVal, err := RandOut(out, size) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustRandint(high int64, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := Randint(high, size, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustRandint1(low int64, high int64, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := Randint1(low, high, size, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustRandintLike(high int64, del bool)(retVal *Tensor) { + + retVal, err := ts.RandintLike(high, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustRandintLike1(low int64, high int64, del bool)(retVal *Tensor) { + + retVal, err := ts.RandintLike1(low, high, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustRandintOut(out *Tensor, high int64, size []int64)(retVal *Tensor) { + + retVal, err := RandintOut(out, high, size) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustRandintOut1(out *Tensor, low int64, high int64, size []int64)(retVal *Tensor) { + + retVal, err := RandintOut1(out, low, high, size) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustRandn(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := Randn(size, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustRandnLike(del bool)(retVal *Tensor) { + + retVal, err := ts.RandnLike(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustRandnOut(out *Tensor, size []int64)(retVal *Tensor) { + + retVal, err := RandnOut(out, size) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustRandom_()() { + + err := ts.Random_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustRandom1_(to int64)() { + + err := ts.Random1_(to) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustRandom2(from int64, to []int64)() { + + err := ts.Random2(from, to) + if err != nil { log.Fatal(err) } + + return +} + +func MustRandperm(n int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := Randperm(n, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustRandpermOut(out *Tensor, n int64)(retVal *Tensor) { + + retVal, err := RandpermOut(out, n) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustRange(start *Scalar, end *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := Range(start, end, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustRange1(start *Scalar, end *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := Range1(start, end, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustRangeOut(out *Tensor, start *Scalar, end *Scalar)(retVal *Tensor) { + + retVal, err := RangeOut(out, start, end) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustReal(del bool)(retVal *Tensor) { + + retVal, err := ts.Real(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustReciprocal(del bool)(retVal *Tensor) { + + retVal, err := ts.Reciprocal(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustReciprocal_()() { + + err := ts.Reciprocal_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustReciprocalOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.ReciprocalOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustReflectionPad1d(padding []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.ReflectionPad1d(padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustReflectionPad1dBackward(gradOutput *Tensor, padding []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.ReflectionPad1dBackward(gradOutput, padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustReflectionPad1dBackwardOut(gradInput *Tensor, gradOutput *Tensor, padding []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.ReflectionPad1dBackwardOut(gradInput, gradOutput, padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustReflectionPad1dOut(out *Tensor, padding []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.ReflectionPad1dOut(out, padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustReflectionPad2d(padding []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.ReflectionPad2d(padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustReflectionPad2dBackward(gradOutput *Tensor, padding []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.ReflectionPad2dBackward(gradOutput, padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustReflectionPad2dBackwardOut(gradInput *Tensor, gradOutput *Tensor, padding []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.ReflectionPad2dBackwardOut(gradInput, gradOutput, padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustReflectionPad2dOut(out *Tensor, padding []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.ReflectionPad2dOut(out, padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustRelu(del bool)(retVal *Tensor) { + + retVal, err := ts.Relu(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustRelu_()() { + + err := ts.Relu_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustRemainder(other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.Remainder(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustRemainder1(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Remainder1(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustRemainder_(other *Scalar)() { + + err := ts.Remainder_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustRemainder1_(other *Tensor)() { + + err := ts.Remainder1_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustRemainderOut(out *Tensor, other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.RemainderOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustRemainderOut1(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.RemainderOut1(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustRenorm(p *Scalar, dim int64, maxnorm *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.Renorm(p, dim, maxnorm, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustRenorm_(p *Scalar, dim int64, maxnorm *Scalar)() { + + err := ts.Renorm_(p, dim, maxnorm) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustRenormOut(out *Tensor, p *Scalar, dim int64, maxnorm *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.RenormOut(out, p, dim, maxnorm, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustRepeat(repeats []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Repeat(repeats, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustRepeatInterleave(repeats *Tensor)(retVal *Tensor) { + + retVal, err := RepeatInterleave(repeats) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustRepeatInterleave1(repeats *Tensor, dim []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.RepeatInterleave1(repeats, dim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustRepeatInterleave2(repeats int64, dim []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.RepeatInterleave2(repeats, dim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustReplicationPad1d(padding []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.ReplicationPad1d(padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustReplicationPad1dBackward(gradOutput *Tensor, padding []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.ReplicationPad1dBackward(gradOutput, padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustReplicationPad1dBackwardOut(gradInput *Tensor, gradOutput *Tensor, padding []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.ReplicationPad1dBackwardOut(gradInput, gradOutput, padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustReplicationPad1dOut(out *Tensor, padding []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.ReplicationPad1dOut(out, padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustReplicationPad2d(padding []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.ReplicationPad2d(padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustReplicationPad2dBackward(gradOutput *Tensor, padding []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.ReplicationPad2dBackward(gradOutput, padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustReplicationPad2dBackwardOut(gradInput *Tensor, gradOutput *Tensor, padding []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.ReplicationPad2dBackwardOut(gradInput, gradOutput, padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustReplicationPad2dOut(out *Tensor, padding []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.ReplicationPad2dOut(out, padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustReplicationPad3d(padding []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.ReplicationPad3d(padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustReplicationPad3dBackward(gradOutput *Tensor, padding []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.ReplicationPad3dBackward(gradOutput, padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustReplicationPad3dBackwardOut(gradInput *Tensor, gradOutput *Tensor, padding []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.ReplicationPad3dBackwardOut(gradInput, gradOutput, padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustReplicationPad3dOut(out *Tensor, padding []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.ReplicationPad3dOut(out, padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustRequiresGrad_(requiresGrad bool)() { + + err := ts.RequiresGrad_(requiresGrad) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustReshape(shape []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Reshape(shape, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustReshapeAs(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.ReshapeAs(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustResize_(size []int64)() { + + err := ts.Resize_(size) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustResizeAs_(theTemplate *Tensor)() { + + err := ts.ResizeAs_(theTemplate) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustRfft(signalNdim int64, normalized bool, onesided bool, del bool)(retVal *Tensor) { + + retVal, err := ts.Rfft(signalNdim, normalized, onesided, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustRnnReluCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor)(retVal *Tensor) { + + retVal, err := RnnReluCell(input, hx, wIh, wHh, bIh, bHh) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustRnnTanhCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh *Tensor, bHh *Tensor)(retVal *Tensor) { + + retVal, err := RnnTanhCell(input, hx, wIh, wHh, bIh, bHh) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustRoll(shifts []int64, dims []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Roll(shifts, dims, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustRot90(k int64, dims []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Rot90(k, dims, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustRound(del bool)(retVal *Tensor) { + + retVal, err := ts.Round(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustRound_()() { + + err := ts.Round_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustRoundOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.RoundOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustRrelu(training bool, del bool)(retVal *Tensor) { + + retVal, err := ts.Rrelu(training, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustRrelu_(training bool)() { + + err := ts.Rrelu_(training) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustRreluWithNoise(noise *Tensor, training bool, del bool)(retVal *Tensor) { + + retVal, err := ts.RreluWithNoise(noise, training, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustRreluWithNoise_(noise *Tensor, training bool)() { + + err := ts.RreluWithNoise_(noise, training) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustRreluWithNoiseBackward(gradOutput *Tensor, noise *Tensor, lower *Scalar, upper *Scalar, training bool, selfIsResult bool, del bool)(retVal *Tensor) { + + retVal, err := ts.RreluWithNoiseBackward(gradOutput, noise, lower, upper, training, selfIsResult, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustRreluWithNoiseOut(out *Tensor, noise *Tensor, training bool, del bool)(retVal *Tensor) { + + retVal, err := ts.RreluWithNoiseOut(out, noise, training, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustRsqrt(del bool)(retVal *Tensor) { + + retVal, err := ts.Rsqrt(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustRsqrt_()() { + + err := ts.Rsqrt_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustRsqrtOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.RsqrtOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustRsub(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Rsub(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustRsub1(other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.Rsub1(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustScalarTensor(s *Scalar, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := ScalarTensor(s, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustScatter(dim int64, index *Tensor, src *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Scatter(dim, index, src, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustScatter1(dim int64, index *Tensor, value *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.Scatter1(dim, index, value, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustScatter_(dim int64, index *Tensor, src *Tensor)() { + + err := ts.Scatter_(dim, index, src) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustScatter1_(dim int64, index *Tensor, value *Scalar)() { + + err := ts.Scatter1_(dim, index, value) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustScatter2(dim int64, index *Tensor, src *Tensor, reduce string)() { + + err := ts.Scatter2(dim, index, src, reduce) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustScatter3(dim int64, index *Tensor, value *Scalar, reduce string)() { + + err := ts.Scatter3(dim, index, value, reduce) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustScatterAdd(dim int64, index *Tensor, src *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.ScatterAdd(dim, index, src, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustScatterAdd_(dim int64, index *Tensor, src *Tensor)() { + + err := ts.ScatterAdd_(dim, index, src) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustSearchsorted(sortedSequence *Tensor, outInt32 bool, right bool, del bool)(retVal *Tensor) { + + retVal, err := ts.Searchsorted(sortedSequence, outInt32, right, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustSearchsorted1(sortedSequence *Tensor, selfScalar *Scalar, outInt32 bool, right bool)(retVal *Tensor) { + + retVal, err := Searchsorted1(sortedSequence, selfScalar, outInt32, right) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSearchsortedOut(out *Tensor, sortedSequence *Tensor, outInt32 bool, right bool, del bool)(retVal *Tensor) { + + retVal, err := ts.SearchsortedOut(out, sortedSequence, outInt32, right, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSelect(dim int64, index int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Select(dim, index, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustSelectBackward(grad *Tensor, inputSizes []int64, dim int64, index int64)(retVal *Tensor) { + + retVal, err := SelectBackward(grad, inputSizes, dim, index) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSelu(del bool)(retVal *Tensor) { + + retVal, err := ts.Selu(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSelu_()() { + + err := ts.Selu_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustSet_()() { + + err := ts.Set_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustSet1_(source *Tensor)() { + + err := ts.Set1_(source) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustSetRequiresGrad(r bool, del bool)(retVal *Tensor) { + + retVal, err := ts.SetRequiresGrad(r, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSgn(del bool)(retVal *Tensor) { + + retVal, err := ts.Sgn(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSgn_()() { + + err := ts.Sgn_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustSgnOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.SgnOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSigmoid(del bool)(retVal *Tensor) { + + retVal, err := ts.Sigmoid(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSigmoid_()() { + + err := ts.Sigmoid_() + if err != nil { log.Fatal(err) } + + return +} + +func MustSigmoidBackward(gradOutput *Tensor, output *Tensor)(retVal *Tensor) { + + retVal, err := SigmoidBackward(gradOutput, output) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustSigmoidBackwardOut(gradInput *Tensor, gradOutput *Tensor, output *Tensor)(retVal *Tensor) { + + retVal, err := SigmoidBackwardOut(gradInput, gradOutput, output) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSigmoidOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.SigmoidOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSign(del bool)(retVal *Tensor) { + + retVal, err := ts.Sign(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSign_()() { + + err := ts.Sign_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustSignOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.SignOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSignbit(del bool)(retVal *Tensor) { + + retVal, err := ts.Signbit(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSignbitOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.SignbitOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSilu(del bool)(retVal *Tensor) { + + retVal, err := ts.Silu(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSilu_()() { + + err := ts.Silu_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustSiluBackward(gradOutput *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.SiluBackward(gradOutput, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSiluOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.SiluOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSin(del bool)(retVal *Tensor) { + + retVal, err := ts.Sin(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSin_()() { + + err := ts.Sin_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustSinOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.SinOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSinh(del bool)(retVal *Tensor) { + + retVal, err := ts.Sinh(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSinh_()() { + + err := ts.Sinh_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustSinhOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.SinhOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSlice(dim int64, start int64, end int64, step int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Slice(dim, start, end, step, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustSliceBackward(grad *Tensor, inputSizes []int64, dim int64, start int64, end int64, step int64)(retVal *Tensor) { + + retVal, err := SliceBackward(grad, inputSizes, dim, start, end, step) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSlowConv3d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.SlowConv3d(weight, kernelSize, bias, stride, padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSlowConv3dOut(out *Tensor, weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.SlowConv3dOut(out, weight, kernelSize, bias, stride, padding, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSlowConvDilated2d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, dilation []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.SlowConvDilated2d(weight, kernelSize, bias, stride, padding, dilation, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSlowConvDilated3d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, dilation []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.SlowConvDilated3d(weight, kernelSize, bias, stride, padding, dilation, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSlowConvTranspose2d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, dilation []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.SlowConvTranspose2d(weight, kernelSize, bias, stride, padding, outputPadding, dilation, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSlowConvTranspose2dOut(out *Tensor, weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, dilation []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.SlowConvTranspose2dOut(out, weight, kernelSize, bias, stride, padding, outputPadding, dilation, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSlowConvTranspose3d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, dilation []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.SlowConvTranspose3d(weight, kernelSize, bias, stride, padding, outputPadding, dilation, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSlowConvTranspose3dOut(out *Tensor, weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, outputPadding []int64, dilation []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.SlowConvTranspose3dOut(out, weight, kernelSize, bias, stride, padding, outputPadding, dilation, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSmm(mat2 *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Smm(mat2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSmoothL1Loss(target *Tensor, reduction int64, beta float64, del bool)(retVal *Tensor) { + + retVal, err := ts.SmoothL1Loss(target, reduction, beta, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSmoothL1LossBackward(gradOutput *Tensor, target *Tensor, reduction int64, beta float64, del bool)(retVal *Tensor) { + + retVal, err := ts.SmoothL1LossBackward(gradOutput, target, reduction, beta, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSmoothL1LossBackwardOut(gradInput *Tensor, gradOutput *Tensor, target *Tensor, reduction int64, beta float64, del bool)(retVal *Tensor) { + + retVal, err := ts.SmoothL1LossBackwardOut(gradInput, gradOutput, target, reduction, beta, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSmoothL1LossOut(out *Tensor, target *Tensor, reduction int64, beta float64, del bool)(retVal *Tensor) { + + retVal, err := ts.SmoothL1LossOut(out, target, reduction, beta, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSoftMarginLoss(target *Tensor, reduction int64, del bool)(retVal *Tensor) { + + retVal, err := ts.SoftMarginLoss(target, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSoftMarginLossBackward(gradOutput *Tensor, target *Tensor, reduction int64, del bool)(retVal *Tensor) { + + retVal, err := ts.SoftMarginLossBackward(gradOutput, target, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSoftMarginLossBackwardOut(gradInput *Tensor, gradOutput *Tensor, target *Tensor, reduction int64, del bool)(retVal *Tensor) { + + retVal, err := ts.SoftMarginLossBackwardOut(gradInput, gradOutput, target, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSoftMarginLossOut(out *Tensor, target *Tensor, reduction int64, del bool)(retVal *Tensor) { + + retVal, err := ts.SoftMarginLossOut(out, target, reduction, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSoftmax(dim int64, dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts.Softmax(dim, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSoftplus(del bool)(retVal *Tensor) { + + retVal, err := ts.Softplus(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSoftplusBackward(gradOutput *Tensor, beta *Scalar, threshold *Scalar, output *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.SoftplusBackward(gradOutput, beta, threshold, output, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSoftplusBackwardOut(gradInput *Tensor, gradOutput *Tensor, beta *Scalar, threshold *Scalar, output *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.SoftplusBackwardOut(gradInput, gradOutput, beta, threshold, output, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSoftplusOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.SoftplusOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSoftshrink(del bool)(retVal *Tensor) { + + retVal, err := ts.Softshrink(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSoftshrinkBackward(gradOutput *Tensor, lambd *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.SoftshrinkBackward(gradOutput, lambd, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSoftshrinkBackwardOut(gradInput *Tensor, gradOutput *Tensor, lambd *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.SoftshrinkBackwardOut(gradInput, gradOutput, lambd, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSoftshrinkOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.SoftshrinkOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustSparseCooTensor(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := SparseCooTensor(size, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustSparseCooTensor1(indices *Tensor, values *Tensor, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := SparseCooTensor1(indices, values, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustSparseCooTensor2(indices *Tensor, values *Tensor, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := SparseCooTensor2(indices, values, size, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSparseMask(mask *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.SparseMask(mask, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSparseResize_(size []int64, sparseDim int64, denseDim int64)() { + + err := ts.SparseResize_(size, sparseDim, denseDim) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustSparseResizeAndClear_(size []int64, sparseDim int64, denseDim int64)() { + + err := ts.SparseResizeAndClear_(size, sparseDim, denseDim) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustSqrt(del bool)(retVal *Tensor) { + + retVal, err := ts.Sqrt(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSqrt_()() { + + err := ts.Sqrt_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustSqrtOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.SqrtOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSquare(del bool)(retVal *Tensor) { + + retVal, err := ts.Square(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSquare_()() { + + err := ts.Square_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustSqueeze(del bool)(retVal *Tensor) { + + retVal, err := ts.Squeeze(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSqueeze1(dim int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Squeeze1(dim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSqueeze_()() { + + err := ts.Squeeze_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustSqueeze1_(dim int64)() { + + err := ts.Squeeze1_(dim) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustSspaddmm(mat1 *Tensor, mat2 *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Sspaddmm(mat1, mat2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSspaddmmOut(out *Tensor, mat1 *Tensor, mat2 *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.SspaddmmOut(out, mat1, mat2, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustStack(tensors []Tensor, dim int64)(retVal *Tensor) { + + retVal, err := Stack(tensors, dim) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustStackOut(out *Tensor, tensors []Tensor, dim int64)(retVal *Tensor) { + + retVal, err := StackOut(out, tensors, dim) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustStd(unbiased bool, del bool)(retVal *Tensor) { + + retVal, err := ts.Std(unbiased, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustStd1(dim []int64, unbiased bool, keepdim bool, del bool)(retVal *Tensor) { + + retVal, err := ts.Std1(dim, unbiased, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustStdOut(out *Tensor, dim []int64, unbiased bool, keepdim bool, del bool)(retVal *Tensor) { + + retVal, err := ts.StdOut(out, dim, unbiased, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustStft(nFft int64, hopLength []int64, winLength []int64, window *Tensor, normalized bool, onesided bool, returnComplex bool, del bool)(retVal *Tensor) { + + retVal, err := ts.Stft(nFft, hopLength, winLength, window, normalized, onesided, returnComplex, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSub(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Sub(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSub1(other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.Sub1(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSub_(other *Tensor)() { + + err := ts.Sub_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustSub1_(other *Scalar)() { + + err := ts.Sub1_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustSubOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.SubOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSubtract(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Subtract(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSubtract1(other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.Subtract1(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSubtract_(other *Tensor)() { + + err := ts.Subtract_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustSubtract1_(other *Scalar)() { + + err := ts.Subtract1_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustSubtractOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.SubtractOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSum(dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts.Sum(dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSum1(dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts.Sum1(dim, keepdim, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSumOut(out *Tensor, dim []int64, keepdim bool, dtype gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts.SumOut(out, dim, keepdim, dtype, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustSumToSize(size []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.SumToSize(size, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustT(del bool)(retVal *Tensor) { + + retVal, err := ts.T(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustT_()() { + + err := ts.T_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustTake(index *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Take(index, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustTakeBackward(grad *Tensor, input *Tensor, index *Tensor)(retVal *Tensor) { + + retVal, err := TakeBackward(grad, input, index) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustTakeOut(out *Tensor, index *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.TakeOut(out, index, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustTan(del bool)(retVal *Tensor) { + + retVal, err := ts.Tan(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustTan_()() { + + err := ts.Tan_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustTanOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.TanOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustTanh(del bool)(retVal *Tensor) { + + retVal, err := ts.Tanh(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustTanh_()() { + + err := ts.Tanh_() + if err != nil { log.Fatal(err) } + + return +} + +func MustTanhBackward(gradOutput *Tensor, output *Tensor)(retVal *Tensor) { + + retVal, err := TanhBackward(gradOutput, output) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustTanhBackwardOut(gradInput *Tensor, gradOutput *Tensor, output *Tensor)(retVal *Tensor) { + + retVal, err := TanhBackwardOut(gradInput, gradOutput, output) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustTanhOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.TanhOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustTensordot(other *Tensor, dimsSelf []int64, dimsOther []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Tensordot(other, dimsSelf, dimsOther, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustThreshold(threshold *Scalar, value *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.Threshold(threshold, value, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustThreshold_(threshold *Scalar, value *Scalar)() { + + err := ts.Threshold_(threshold, value) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustThresholdBackward(gradOutput *Tensor, threshold *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.ThresholdBackward(gradOutput, threshold, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustThresholdOut(out *Tensor, threshold *Scalar, value *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.ThresholdOut(out, threshold, value, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustTo(device gotch.Device, del bool)(retVal *Tensor) { + + retVal, err := ts.To(device, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustTo1(optionsKind gotch.DType, optionsDevice gotch.Device, nonBlocking bool, copy bool, del bool)(retVal *Tensor) { + + retVal, err := ts.To1(optionsKind, optionsDevice, nonBlocking, copy, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustTo2(dtype gotch.DType, nonBlocking bool, copy bool, del bool)(retVal *Tensor) { + + retVal, err := ts.To2(dtype, nonBlocking, copy, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustTo3(other *Tensor, nonBlocking bool, copy bool, del bool)(retVal *Tensor) { + + retVal, err := ts.To3(other, nonBlocking, copy, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustTo4(device gotch.Device, dtype gotch.DType, nonBlocking bool, copy bool, del bool)(retVal *Tensor) { + + retVal, err := ts.To4(device, dtype, nonBlocking, copy, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustToDense(del bool)(retVal *Tensor) { + + retVal, err := ts.ToDense(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustToDenseBackward(grad *Tensor, input *Tensor)(retVal *Tensor) { + + retVal, err := ToDenseBackward(grad, input) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustToMkldnn(del bool)(retVal *Tensor) { + + retVal, err := ts.ToMkldnn(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustToMkldnnBackward(grad *Tensor, input *Tensor)(retVal *Tensor) { + + retVal, err := ToMkldnnBackward(grad, input) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustToSparse(del bool)(retVal *Tensor) { + + retVal, err := ts.ToSparse(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustToSparse1(sparseDim int64, del bool)(retVal *Tensor) { + + retVal, err := ts.ToSparse1(sparseDim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustTotype(scalarType gotch.DType, del bool)(retVal *Tensor) { + + retVal, err := ts.Totype(scalarType, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustTrace(del bool)(retVal *Tensor) { + + retVal, err := ts.Trace(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustTraceBackward(grad *Tensor, sizes []int64)(retVal *Tensor) { + + retVal, err := TraceBackward(grad, sizes) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustTranspose(dim0 int64, dim1 int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Transpose(dim0, dim1, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustTranspose_(dim0 int64, dim1 int64)() { + + err := ts.Transpose_(dim0, dim1) + if err != nil { log.Fatal(err) } + + return +} + +func MustTrapz(y *Tensor, x *Tensor, dim int64)(retVal *Tensor) { + + retVal, err := Trapz(y, x, dim) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustTrapz1(y *Tensor, dx float64, dim int64)(retVal *Tensor) { + + retVal, err := Trapz1(y, dx, dim) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustTril(diagonal int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Tril(diagonal, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustTril_(diagonal int64)() { + + err := ts.Tril_(diagonal) + if err != nil { log.Fatal(err) } + + return +} + +func MustTrilIndices(row int64, col int64, offset int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := TrilIndices(row, col, offset, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustTrilOut(out *Tensor, diagonal int64, del bool)(retVal *Tensor) { + + retVal, err := ts.TrilOut(out, diagonal, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustTripletMarginLoss(anchor *Tensor, positive *Tensor, negative *Tensor, margin float64, p float64, eps float64, swap bool, reduction int64)(retVal *Tensor) { + + retVal, err := TripletMarginLoss(anchor, positive, negative, margin, p, eps, swap, reduction) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustTriu(diagonal int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Triu(diagonal, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustTriu_(diagonal int64)() { + + err := ts.Triu_(diagonal) + if err != nil { log.Fatal(err) } + + return +} + +func MustTriuIndices(row int64, col int64, offset int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := TriuIndices(row, col, offset, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustTriuOut(out *Tensor, diagonal int64, del bool)(retVal *Tensor) { + + retVal, err := ts.TriuOut(out, diagonal, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustTrueDivide(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.TrueDivide(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustTrueDivide1(other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.TrueDivide1(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustTrueDivide_(other *Tensor)() { + + err := ts.TrueDivide_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustTrueDivide1_(other *Scalar)() { + + err := ts.TrueDivide1_(other) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustTrueDivideOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.TrueDivideOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustTrunc(del bool)(retVal *Tensor) { + + retVal, err := ts.Trunc(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustTrunc_()() { + + err := ts.Trunc_() + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustTruncOut(out *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.TruncOut(out, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustTypeAs(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.TypeAs(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustUnflatten(dim int64, sizes []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Unflatten(dim, sizes, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustUnfold(dimension int64, size int64, step int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Unfold(dimension, size, step, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustUnfoldBackward(gradIn *Tensor, inputSizes []int64, dim int64, size int64, step int64)(retVal *Tensor) { + + retVal, err := UnfoldBackward(gradIn, inputSizes, dim, size, step) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustUniform_(from float64, to float64)() { + + err := ts.Uniform_(from, to) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustUnsqueeze(dim int64, del bool)(retVal *Tensor) { + + retVal, err := ts.Unsqueeze(dim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustUnsqueeze_(dim int64)() { + + err := ts.Unsqueeze_(dim) + if err != nil { log.Fatal(err) } + + return +} + +func(ts *Tensor) MustUpsampleBicubic2d(outputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor) { + + retVal, err := ts.UpsampleBicubic2d(outputSize, alignCorners, scalesH, scalesW, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustUpsampleBicubic2dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64)(retVal *Tensor) { + + retVal, err := UpsampleBicubic2dBackward(gradOutput, outputSize, inputSize, alignCorners, scalesH, scalesW) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustUpsampleBicubic2dBackwardOut(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64)(retVal *Tensor) { + + retVal, err := UpsampleBicubic2dBackwardOut(gradInput, gradOutput, outputSize, inputSize, alignCorners, scalesH, scalesW) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustUpsampleBicubic2dOut(out *Tensor, outputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor) { + + retVal, err := ts.UpsampleBicubic2dOut(out, outputSize, alignCorners, scalesH, scalesW, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustUpsampleBilinear2d(outputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor) { + + retVal, err := ts.UpsampleBilinear2d(outputSize, alignCorners, scalesH, scalesW, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustUpsampleBilinear2dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64)(retVal *Tensor) { + + retVal, err := UpsampleBilinear2dBackward(gradOutput, outputSize, inputSize, alignCorners, scalesH, scalesW) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustUpsampleBilinear2dBackwardOut(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64)(retVal *Tensor) { + + retVal, err := UpsampleBilinear2dBackwardOut(gradInput, gradOutput, outputSize, inputSize, alignCorners, scalesH, scalesW) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustUpsampleBilinear2dOut(out *Tensor, outputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor) { + + retVal, err := ts.UpsampleBilinear2dOut(out, outputSize, alignCorners, scalesH, scalesW, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustUpsampleLinear1d(outputSize []int64, alignCorners bool, scales []float64, del bool)(retVal *Tensor) { + + retVal, err := ts.UpsampleLinear1d(outputSize, alignCorners, scales, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustUpsampleLinear1dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scales []float64)(retVal *Tensor) { + + retVal, err := UpsampleLinear1dBackward(gradOutput, outputSize, inputSize, alignCorners, scales) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustUpsampleLinear1dBackwardOut(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scales []float64)(retVal *Tensor) { + + retVal, err := UpsampleLinear1dBackwardOut(gradInput, gradOutput, outputSize, inputSize, alignCorners, scales) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustUpsampleLinear1dOut(out *Tensor, outputSize []int64, alignCorners bool, scales []float64, del bool)(retVal *Tensor) { + + retVal, err := ts.UpsampleLinear1dOut(out, outputSize, alignCorners, scales, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustUpsampleNearest1d(outputSize []int64, scales []float64, del bool)(retVal *Tensor) { + + retVal, err := ts.UpsampleNearest1d(outputSize, scales, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustUpsampleNearest1dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, scales []float64)(retVal *Tensor) { + + retVal, err := UpsampleNearest1dBackward(gradOutput, outputSize, inputSize, scales) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustUpsampleNearest1dBackwardOut(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, scales []float64)(retVal *Tensor) { + + retVal, err := UpsampleNearest1dBackwardOut(gradInput, gradOutput, outputSize, inputSize, scales) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustUpsampleNearest1dOut(out *Tensor, outputSize []int64, scales []float64, del bool)(retVal *Tensor) { + + retVal, err := ts.UpsampleNearest1dOut(out, outputSize, scales, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustUpsampleNearest2d(outputSize []int64, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor) { + + retVal, err := ts.UpsampleNearest2d(outputSize, scalesH, scalesW, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustUpsampleNearest2dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesH []float64, scalesW []float64)(retVal *Tensor) { + + retVal, err := UpsampleNearest2dBackward(gradOutput, outputSize, inputSize, scalesH, scalesW) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustUpsampleNearest2dBackwardOut(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesH []float64, scalesW []float64)(retVal *Tensor) { + + retVal, err := UpsampleNearest2dBackwardOut(gradInput, gradOutput, outputSize, inputSize, scalesH, scalesW) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustUpsampleNearest2dOut(out *Tensor, outputSize []int64, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor) { + + retVal, err := ts.UpsampleNearest2dOut(out, outputSize, scalesH, scalesW, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustUpsampleNearest3d(outputSize []int64, scalesD []float64, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor) { + + retVal, err := ts.UpsampleNearest3d(outputSize, scalesD, scalesH, scalesW, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustUpsampleNearest3dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesD []float64, scalesH []float64, scalesW []float64)(retVal *Tensor) { + + retVal, err := UpsampleNearest3dBackward(gradOutput, outputSize, inputSize, scalesD, scalesH, scalesW) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustUpsampleNearest3dBackwardOut(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesD []float64, scalesH []float64, scalesW []float64)(retVal *Tensor) { + + retVal, err := UpsampleNearest3dBackwardOut(gradInput, gradOutput, outputSize, inputSize, scalesD, scalesH, scalesW) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustUpsampleNearest3dOut(out *Tensor, outputSize []int64, scalesD []float64, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor) { + + retVal, err := ts.UpsampleNearest3dOut(out, outputSize, scalesD, scalesH, scalesW, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustUpsampleTrilinear3d(outputSize []int64, alignCorners bool, scalesD []float64, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor) { + + retVal, err := ts.UpsampleTrilinear3d(outputSize, alignCorners, scalesD, scalesH, scalesW, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustUpsampleTrilinear3dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesD []float64, scalesH []float64, scalesW []float64)(retVal *Tensor) { + + retVal, err := UpsampleTrilinear3dBackward(gradOutput, outputSize, inputSize, alignCorners, scalesD, scalesH, scalesW) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustUpsampleTrilinear3dBackwardOut(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesD []float64, scalesH []float64, scalesW []float64)(retVal *Tensor) { + + retVal, err := UpsampleTrilinear3dBackwardOut(gradInput, gradOutput, outputSize, inputSize, alignCorners, scalesD, scalesH, scalesW) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustUpsampleTrilinear3dOut(out *Tensor, outputSize []int64, alignCorners bool, scalesD []float64, scalesH []float64, scalesW []float64, del bool)(retVal *Tensor) { + + retVal, err := ts.UpsampleTrilinear3dOut(out, outputSize, alignCorners, scalesD, scalesH, scalesW, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustValueSelectingReductionBackward(grad *Tensor, dim int64, indices *Tensor, sizes []int64, keepdim bool)(retVal *Tensor) { + + retVal, err := ValueSelectingReductionBackward(grad, dim, indices, sizes, keepdim) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustValues(del bool)(retVal *Tensor) { + + retVal, err := ts.Values(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustVander(x *Tensor, n []int64, increasing bool)(retVal *Tensor) { + + retVal, err := Vander(x, n, increasing) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustVar(unbiased bool, del bool)(retVal *Tensor) { + + retVal, err := ts.Var(unbiased, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustVar1(dim []int64, unbiased bool, keepdim bool, del bool)(retVal *Tensor) { + + retVal, err := ts.Var1(dim, unbiased, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustVarOut(out *Tensor, dim []int64, unbiased bool, keepdim bool, del bool)(retVal *Tensor) { + + retVal, err := ts.VarOut(out, dim, unbiased, keepdim, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustVdot(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Vdot(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustVdotOut(out *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.VdotOut(out, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustView(size []int64, del bool)(retVal *Tensor) { + + retVal, err := ts.View(size, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustViewAs(other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.ViewAs(other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustViewAsComplex(del bool)(retVal *Tensor) { + + retVal, err := ts.ViewAsComplex(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustViewAsReal(del bool)(retVal *Tensor) { + + retVal, err := ts.ViewAsReal(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustVstack(tensors []Tensor)(retVal *Tensor) { + + retVal, err := Vstack(tensors) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustVstackOut(out *Tensor, tensors []Tensor)(retVal *Tensor) { + + retVal, err := VstackOut(out, tensors) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustWhere1(condition *Tensor, other *Tensor, del bool)(retVal *Tensor) { + + retVal, err := ts.Where1(condition, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustWhere2(condition *Tensor, selfScalar *Scalar, other *Tensor)(retVal *Tensor) { + + retVal, err := Where2(condition, selfScalar, other) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustWhere3(condition *Tensor, other *Scalar, del bool)(retVal *Tensor) { + + retVal, err := ts.Where3(condition, other, del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustWhere4(condition *Tensor, selfScalar *Scalar, other *Scalar)(retVal *Tensor) { + + retVal, err := Where4(condition, selfScalar, other) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustZero_()() { + + err := ts.Zero_() + if err != nil { log.Fatal(err) } + + return +} + +func MustZeros(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device)(retVal *Tensor) { + + retVal, err := Zeros(size, optionsKind, optionsDevice) + if err != nil { log.Fatal(err) } + + return retVal +} + +func(ts *Tensor) MustZerosLike(del bool)(retVal *Tensor) { + + retVal, err := ts.ZerosLike(del) + if err != nil { log.Fatal(err) } + + return retVal +} + +func MustZerosOut(out *Tensor, size []int64)(retVal *Tensor) { + + retVal, err := ZerosOut(out, size) + if err != nil { log.Fatal(err) } + + return retVal +} +// End of implementing Tensor ================================= diff --git a/tensor/optimizer.go b/tensor/optimizer.go index 6be1460..10d7da9 100644 --- a/tensor/optimizer.go +++ b/tensor/optimizer.go @@ -67,7 +67,8 @@ func (co *COptimizer) AddParameters(tensors []Tensor) error { ntensors := len(tensors) - lib.AtoAddParameters(co.coptimizer, ctensors, ntensors) + // NOTE. temporary switch back as param group not updated yet! + lib.AtoAddParametersOld(co.coptimizer, ctensors, ntensors) return TorchErr() } diff --git a/tensor/other.go b/tensor/other.go index 2c1db26..b552edf 100644 --- a/tensor/other.go +++ b/tensor/other.go @@ -19,7 +19,7 @@ func (ts *Tensor) CrossEntropyForLogits(targets *Tensor) (retVal *Tensor) { // AccuracyForLogits returns the average accuracy for some given logits assuming that // targets represent ground-truth. func (ts *Tensor) AccuracyForLogits(targets *Tensor) (retVal *Tensor) { - argmax := ts.MustArgmax(-1, false, true) + argmax := ts.MustArgmax([]int64{-1}, false, true) eq1 := argmax.MustEq1(targets, true) return eq1.MustTotype(gotch.Float, true).MustMean(gotch.Float, true) } diff --git a/tensor/tensor-generated.go b/tensor/tensor-generated.go index 8a4c832..5d87325 100644 --- a/tensor/tensor-generated.go +++ b/tensor/tensor-generated.go @@ -262,13 +262,13 @@ func (ts *Tensor) _AdaptiveAvgPool2dBackward(gradOutput *Tensor, del bool) (retV return retVal, err } -func (ts *Tensor) _Addr(vec1 *Tensor, vec2 *Tensor, del bool) (retVal *Tensor, err error) { +func (ts *Tensor) _AddBatchDim(batchDim int64, level int64, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() } ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - lib.Atg_Addr(ptr, ts.ctensor, vec1.ctensor, vec2.ctensor) + lib.Atg_AddBatchDim(ptr, ts.ctensor, batchDim, level) if err = TorchErr(); err != nil { return retVal, err } @@ -277,10 +277,25 @@ func (ts *Tensor) _Addr(vec1 *Tensor, vec2 *Tensor, del bool) (retVal *Tensor, e return retVal, err } -func (ts *Tensor) _Addr_(vec1 *Tensor, vec2 *Tensor) (err error) { +func (ts *Tensor) _AddRelu(other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - lib.Atg_Addr_(ptr, ts.ctensor, vec1.ctensor, vec2.ctensor) + lib.Atg_AddRelu(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) _AddRelu_(other *Tensor) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_AddRelu_(ptr, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { return err } @@ -288,13 +303,13 @@ func (ts *Tensor) _Addr_(vec1 *Tensor, vec2 *Tensor) (err error) { return err } -func (ts *Tensor) _AddrOut(out *Tensor, vec1 *Tensor, vec2 *Tensor, del bool) (retVal *Tensor, err error) { +func (ts *Tensor) _AddReluOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() } ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - lib.Atg_AddrOut(ptr, out.ctensor, ts.ctensor, vec1.ctensor, vec2.ctensor) + lib.Atg_AddReluOut(ptr, out.ctensor, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { return retVal, err } @@ -303,6 +318,17 @@ func (ts *Tensor) _AddrOut(out *Tensor, vec1 *Tensor, vec2 *Tensor, del bool) (r return retVal, err } +func (ts *Tensor) _AddmvImpl_(self2 *Tensor, mat *Tensor, vec *Tensor) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_AddmvImpl_(ptr, ts.ctensor, self2.ctensor, mat.ctensor, vec.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + func _AmpUpdateScale(growthTracker *Tensor, currentScale *Tensor, foundInf *Tensor, scaleGrowthFactor float64, scaleBackoffFactor float64, growthInterval int64) (retVal *Tensor, err error) { ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) @@ -326,6 +352,44 @@ func (ts *Tensor) _BaddbmmMkl_(batch1 *Tensor, batch2 *Tensor) (err error) { return err } +func (ts *Tensor) _Bmm(mat2 *Tensor, deterministic bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cdeterministic := int32(0) + if deterministic { + cdeterministic = int32(1) + } + lib.Atg_Bmm(ptr, ts.ctensor, mat2.ctensor, cdeterministic) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) _BmmOut(out *Tensor, mat2 *Tensor, deterministic bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cdeterministic := int32(0) + if deterministic { + cdeterministic = int32(1) + } + lib.Atg_BmmOut(ptr, out.ctensor, ts.ctensor, mat2.ctensor, cdeterministic) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func (ts *Tensor) _CastByte(nonBlocking bool, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() @@ -575,6 +639,45 @@ func (ts *Tensor) _Coalesced_(coalesced bool) (err error) { return err } +func _ComputeLinearCombination(input *Tensor, coefficients *Tensor) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_ComputeLinearCombination(ptr, input.ctensor, coefficients.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _ComputeLinearCombinationOut(out *Tensor, input *Tensor, coefficients *Tensor) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_ComputeLinearCombinationOut(ptr, out.ctensor, input.ctensor, coefficients.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) _Conj(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_Conj(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func _Convolution(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64, groups int64, benchmark bool, deterministic bool, cudnnEnabled bool) (retVal *Tensor, err error) { ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) @@ -603,6 +706,38 @@ func _Convolution(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, p return retVal, err } +func _Convolution1(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64, groups int64, benchmark bool, deterministic bool, cudnnEnabled bool, allowTf32 bool) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ctransposed := int32(0) + if transposed { + ctransposed = int32(1) + } + cbenchmark := int32(0) + if benchmark { + cbenchmark = int32(1) + } + cdeterministic := int32(0) + if deterministic { + cdeterministic = int32(1) + } + ccudnnEnabled := int32(0) + if cudnnEnabled { + ccudnnEnabled = int32(1) + } + callowTf32 := int32(0) + if allowTf32 { + callowTf32 = int32(1) + } + lib.Atg_Convolution1(ptr, input.ctensor, weight.ctensor, bias.ctensor, stride, len(stride), padding, len(padding), dilation, len(dilation), ctransposed, outputPadding, len(outputPadding), groups, cbenchmark, cdeterministic, ccudnnEnabled, callowTf32) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func _ConvolutionNogroup(input *Tensor, weight *Tensor, bias *Tensor, stride []int64, padding []int64, dilation []int64, transposed bool, outputPadding []int64) (retVal *Tensor, err error) { ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) @@ -866,6 +1001,48 @@ func _EmptyPerChannelAffineQuantized(size []int64, scales *Tensor, zeroPoints *T return retVal, err } +func _EuclideanDist(x1 *Tensor, x2 *Tensor) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_EuclideanDist(ptr, x1.ctensor, x2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) _FakeQuantizeLearnablePerChannelAffine(scale *Tensor, zeroPoint *Tensor, axis int64, quantMin int64, quantMax int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_FakeQuantizeLearnablePerChannelAffine(ptr, ts.ctensor, scale.ctensor, zeroPoint.ctensor, axis, quantMin, quantMax) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) _FakeQuantizeLearnablePerTensorAffine(scale *Tensor, zeroPoint *Tensor, quantMin int64, quantMax int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_FakeQuantizeLearnablePerTensorAffine(ptr, ts.ctensor, scale.ctensor, zeroPoint.ctensor, quantMin, quantMax) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func (ts *Tensor) _FftWithSize(signalNdim int64, complexInput bool, complexOutput bool, inverse bool, checkedSignalSizes []int64, normalized bool, onesided bool, outputSizes []int64, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() @@ -901,6 +1078,37 @@ func (ts *Tensor) _FftWithSize(signalNdim int64, complexInput bool, complexOutpu return retVal, err } +func (ts *Tensor) _FftWithSize1(signalNdim int64, complexInput bool, complexOutput bool, inverse bool, checkedSignalSizes []int64, normalization int64, onesided bool, outputSizes []int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ccomplexInput := int32(0) + if complexInput { + ccomplexInput = int32(1) + } + ccomplexOutput := int32(0) + if complexOutput { + ccomplexOutput = int32(1) + } + cinverse := int32(0) + if inverse { + cinverse = int32(1) + } + conesided := int32(0) + if onesided { + conesided = int32(1) + } + lib.Atg_FftWithSize1(ptr, ts.ctensor, signalNdim, ccomplexInput, ccomplexOutput, cinverse, checkedSignalSizes, len(checkedSignalSizes), normalization, conesided, outputSizes, len(outputSizes)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func (ts *Tensor) _GatherSparseBackward(dim int64, index *Tensor, grad *Tensor, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() @@ -916,6 +1124,22 @@ func (ts *Tensor) _GatherSparseBackward(dim int64, index *Tensor, grad *Tensor, return retVal, err } +func _GridSampler2dCpuFallback(input *Tensor, grid *Tensor, interpolationMode int64, paddingMode int64, alignCorners bool) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + calignCorners := int32(0) + if alignCorners { + calignCorners = int32(1) + } + lib.Atg_GridSampler2dCpuFallback(ptr, input.ctensor, grid.ctensor, interpolationMode, paddingMode, calignCorners) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func (ts *Tensor) _IndexCopy_(dim int64, index *Tensor, source *Tensor) (err error) { ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) @@ -1014,6 +1238,36 @@ func (ts *Tensor) _LogSoftmaxBackwardData(gradOutput *Tensor, output *Tensor, di return retVal, err } +func (ts *Tensor) _Logcumsumexp(dim int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_Logcumsumexp(ptr, ts.ctensor, dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) _LogcumsumexpOut(out *Tensor, dim int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_LogcumsumexpOut(ptr, out.ctensor, ts.ctensor, dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func (ts *Tensor) _LuSolveHelper(lUData *Tensor, lUPivots *Tensor, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() @@ -1194,6 +1448,21 @@ func (ts *Tensor) _PdistBackward(grad *Tensor, p float64, pdist *Tensor, del boo return retVal, err } +func (ts *Tensor) _RemoveBatchDim(level int64, batchSize int64, outDim int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_RemoveBatchDim(ptr, ts.ctensor, level, batchSize, outDim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func (ts *Tensor) _ReshapeFromTensor(shape *Tensor, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() @@ -1239,6 +1508,18 @@ func (ts *Tensor) _SampleDirichlet(del bool) (retVal *Tensor, err error) { return retVal, err } +func _SaturateWeightToFp16(weight *Tensor) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_SaturateWeightToFp16(ptr, weight.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func (ts *Tensor) _ShapeAsTensor(del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() @@ -1372,6 +1653,55 @@ func _SparseCooTensorWithDimsAndTensors(sparseDim int64, denseDim int64, size [] return retVal, err } +func (ts *Tensor) _SparseLogSoftmax(dim int64, dtype gotch.DType, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_SparseLogSoftmax(ptr, ts.ctensor, dim, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) _SparseLogSoftmax1(dim int64, halfToFloat bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + chalfToFloat := int32(0) + if halfToFloat { + chalfToFloat = int32(1) + } + lib.Atg_SparseLogSoftmax1(ptr, ts.ctensor, dim, chalfToFloat) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) _SparseLogSoftmaxBackwardData(gradOutput *Tensor, output *Tensor, dim int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_SparseLogSoftmaxBackwardData(ptr, gradOutput.ctensor, output.ctensor, dim, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func _SparseMm(sparse *Tensor, dense *Tensor) (retVal *Tensor, err error) { ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) @@ -1384,6 +1714,55 @@ func _SparseMm(sparse *Tensor, dense *Tensor) (retVal *Tensor, err error) { return retVal, err } +func (ts *Tensor) _SparseSoftmax(dim int64, dtype gotch.DType, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_SparseSoftmax(ptr, ts.ctensor, dim, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) _SparseSoftmax1(dim int64, halfToFloat bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + chalfToFloat := int32(0) + if halfToFloat { + chalfToFloat = int32(1) + } + lib.Atg_SparseSoftmax1(ptr, ts.ctensor, dim, chalfToFloat) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) _SparseSoftmaxBackwardData(gradOutput *Tensor, output *Tensor, dim int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_SparseSoftmaxBackwardData(ptr, gradOutput.ctensor, output.ctensor, dim, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func (ts *Tensor) _SparseSum(del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() @@ -1508,6 +1887,45 @@ func (ts *Tensor) _Std(unbiased bool, del bool) (retVal *Tensor, err error) { return retVal, err } +func _TestOptionalFilledIntlist(values *Tensor, addends []int64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_TestOptionalFilledIntlist(ptr, values.ctensor, addends, len(addends)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func _TestOptionalIntlist(values *Tensor, addends []int64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_TestOptionalIntlist(ptr, values.ctensor, addends, len(addends)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) _TestSerializationSubcmul(other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.Atg_TestSerializationSubcmul(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func _Trilinear(i1 *Tensor, i2 *Tensor, i3 *Tensor, expand1 []int64, expand2 []int64, expand3 []int64, sumdim []int64, unrollDim int64) (retVal *Tensor, err error) { ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) @@ -1622,6 +2040,47 @@ func (ts *Tensor) AbsOut(out *Tensor, del bool) (retVal *Tensor, err error) { return retVal, err } +func (ts *Tensor) Absolute(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAbsolute(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Absolute_() (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAbsolute_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) AbsoluteOut(out *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAbsoluteOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func (ts *Tensor) Acos(del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() @@ -1663,6 +2122,47 @@ func (ts *Tensor) AcosOut(out *Tensor, del bool) (retVal *Tensor, err error) { return retVal, err } +func (ts *Tensor) Acosh(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAcosh(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Acosh_() (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAcosh_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) AcoshOut(out *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAcoshOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func (ts *Tensor) AdaptiveAvgPool1d(outputSize []int64, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() @@ -2287,6 +2787,82 @@ func (ts *Tensor) AlphaDropout_(p float64, train bool) (err error) { return err } +func (ts *Tensor) Amax(dim []int64, keepdim bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { + ckeepdim = int32(1) + } + lib.AtgAmax(ptr, ts.ctensor, dim, len(dim), ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) AmaxOut(out *Tensor, dim []int64, keepdim bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { + ckeepdim = int32(1) + } + lib.AtgAmaxOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Amin(dim []int64, keepdim bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { + ckeepdim = int32(1) + } + lib.AtgAmin(ptr, ts.ctensor, dim, len(dim), ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) AminOut(out *Tensor, dim []int64, keepdim bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { + ckeepdim = int32(1) + } + lib.AtgAminOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func (ts *Tensor) Angle(del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() @@ -2430,17 +3006,13 @@ func ArangeOut1(out *Tensor, start *Scalar, end *Scalar) (retVal *Tensor, err er return retVal, err } -func (ts *Tensor) Argmax(dim int64, keepdim bool, del bool) (retVal *Tensor, err error) { +func (ts *Tensor) Arccos(del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() } ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) - } - lib.AtgArgmax(ptr, ts.ctensor, dim, ckeepdim) + lib.AtgArccos(ptr, ts.ctensor) if err = TorchErr(); err != nil { return retVal, err } @@ -2449,17 +3021,279 @@ func (ts *Tensor) Argmax(dim int64, keepdim bool, del bool) (retVal *Tensor, err return retVal, err } -func (ts *Tensor) Argmin(dim int64, keepdim bool, del bool) (retVal *Tensor, err error) { +func (ts *Tensor) Arccos_() (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgArccos_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) ArccosOut(out *Tensor, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() } ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + lib.AtgArccosOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Arccosh(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgArccosh(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Arccosh_() (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgArccosh_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) ArccoshOut(out *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgArccoshOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Arcsin(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgArcsin(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Arcsin_() (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgArcsin_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) ArcsinOut(out *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgArcsinOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Arcsinh(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgArcsinh(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Arcsinh_() (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgArcsinh_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) ArcsinhOut(out *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgArcsinhOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Arctan(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgArctan(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Arctan_() (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgArctan_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) ArctanOut(out *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgArctanOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Arctanh(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgArctanh(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Arctanh_() (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgArctanh_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) ArctanhOut(out *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgArctanhOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Argmax(dim []int64, keepdim bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cdimVal int64 = 0 + var cdimNull int = 1 + if len(dim) > 0 { + cdimVal = dim[0] + cdimNull = 0 + } ckeepdim := int32(0) if keepdim { ckeepdim = int32(1) } - lib.AtgArgmin(ptr, ts.ctensor, dim, ckeepdim) + lib.AtgArgmax(ptr, ts.ctensor, cdimVal, cdimNull, ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Argmin(dim []int64, keepdim bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cdimVal int64 = 0 + var cdimNull int = 1 + if len(dim) > 0 { + cdimVal = dim[0] + cdimNull = 0 + } + ckeepdim := int32(0) + if keepdim { + ckeepdim = int32(1) + } + lib.AtgArgmin(ptr, ts.ctensor, cdimVal, cdimNull, ckeepdim) if err = TorchErr(); err != nil { return retVal, err } @@ -2487,13 +3321,19 @@ func (ts *Tensor) Argsort(dim int64, descending bool, del bool) (retVal *Tensor, return retVal, err } -func (ts *Tensor) AsStrided(size []int64, stride []int64, storageOffset int64, del bool) (retVal *Tensor, err error) { +func (ts *Tensor) AsStrided(size []int64, stride []int64, storageOffset []int64, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() } ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - lib.AtgAsStrided(ptr, ts.ctensor, size, len(size), stride, len(stride), storageOffset) + var cstorageOffsetVal int64 = 0 + var cstorageOffsetNull int = 1 + if len(storageOffset) > 0 { + cstorageOffsetVal = storageOffset[0] + cstorageOffsetNull = 0 + } + lib.AtgAsStrided(ptr, ts.ctensor, size, len(size), stride, len(stride), cstorageOffsetVal, cstorageOffsetNull) if err = TorchErr(); err != nil { return retVal, err } @@ -2502,10 +3342,16 @@ func (ts *Tensor) AsStrided(size []int64, stride []int64, storageOffset int64, d return retVal, err } -func (ts *Tensor) AsStrided_(size []int64, stride []int64, storageOffset int64) (err error) { +func (ts *Tensor) AsStrided_(size []int64, stride []int64, storageOffset []int64) (err error) { ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - lib.AtgAsStrided_(ptr, ts.ctensor, size, len(size), stride, len(stride), storageOffset) + var cstorageOffsetVal int64 = 0 + var cstorageOffsetNull int = 1 + if len(storageOffset) > 0 { + cstorageOffsetVal = storageOffset[0] + cstorageOffsetNull = 0 + } + lib.AtgAsStrided_(ptr, ts.ctensor, size, len(size), stride, len(stride), cstorageOffsetVal, cstorageOffsetNull) if err = TorchErr(); err != nil { return err } @@ -2554,6 +3400,47 @@ func (ts *Tensor) AsinOut(out *Tensor, del bool) (retVal *Tensor, err error) { return retVal, err } +func (ts *Tensor) Asinh(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAsinh(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Asinh_() (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAsinh_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) AsinhOut(out *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAsinhOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func (ts *Tensor) Atan(del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() @@ -2636,6 +3523,92 @@ func (ts *Tensor) AtanOut(out *Tensor, del bool) (retVal *Tensor, err error) { return retVal, err } +func (ts *Tensor) Atanh(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAtanh(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Atanh_() (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAtanh_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) AtanhOut(out *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAtanhOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Atleast1d(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAtleast1d(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Atleast2d(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAtleast2d(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Atleast3d(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgAtleast3d(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func (ts *Tensor) AvgPool1d(kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() @@ -2659,7 +3632,7 @@ func (ts *Tensor) AvgPool1d(kernelSize []int64, stride []int64, padding []int64, return retVal, err } -func (ts *Tensor) AvgPool2d(kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride int64, del bool) (retVal *Tensor, err error) { +func (ts *Tensor) AvgPool2d(kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() } @@ -2673,7 +3646,13 @@ func (ts *Tensor) AvgPool2d(kernelSize []int64, stride []int64, padding []int64, if countIncludePad { ccountIncludePad = int32(1) } - lib.AtgAvgPool2d(ptr, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), cceilMode, ccountIncludePad, divisorOverride) + var cdivisorOverrideVal int64 = 0 + var cdivisorOverrideNull int = 1 + if len(divisorOverride) > 0 { + cdivisorOverrideVal = divisorOverride[0] + cdivisorOverrideNull = 0 + } + lib.AtgAvgPool2d(ptr, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), cceilMode, ccountIncludePad, cdivisorOverrideVal, cdivisorOverrideNull) if err = TorchErr(); err != nil { return retVal, err } @@ -2682,7 +3661,7 @@ func (ts *Tensor) AvgPool2d(kernelSize []int64, stride []int64, padding []int64, return retVal, err } -func (ts *Tensor) AvgPool2dBackward(gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride int64, del bool) (retVal *Tensor, err error) { +func (ts *Tensor) AvgPool2dBackward(gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() } @@ -2696,7 +3675,13 @@ func (ts *Tensor) AvgPool2dBackward(gradOutput *Tensor, kernelSize []int64, stri if countIncludePad { ccountIncludePad = int32(1) } - lib.AtgAvgPool2dBackward(ptr, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), cceilMode, ccountIncludePad, divisorOverride) + var cdivisorOverrideVal int64 = 0 + var cdivisorOverrideNull int = 1 + if len(divisorOverride) > 0 { + cdivisorOverrideVal = divisorOverride[0] + cdivisorOverrideNull = 0 + } + lib.AtgAvgPool2dBackward(ptr, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), cceilMode, ccountIncludePad, cdivisorOverrideVal, cdivisorOverrideNull) if err = TorchErr(); err != nil { return retVal, err } @@ -2705,7 +3690,7 @@ func (ts *Tensor) AvgPool2dBackward(gradOutput *Tensor, kernelSize []int64, stri return retVal, err } -func (ts *Tensor) AvgPool2dBackwardOut(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride int64, del bool) (retVal *Tensor, err error) { +func (ts *Tensor) AvgPool2dBackwardOut(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() } @@ -2719,7 +3704,13 @@ func (ts *Tensor) AvgPool2dBackwardOut(gradInput *Tensor, gradOutput *Tensor, ke if countIncludePad { ccountIncludePad = int32(1) } - lib.AtgAvgPool2dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), cceilMode, ccountIncludePad, divisorOverride) + var cdivisorOverrideVal int64 = 0 + var cdivisorOverrideNull int = 1 + if len(divisorOverride) > 0 { + cdivisorOverrideVal = divisorOverride[0] + cdivisorOverrideNull = 0 + } + lib.AtgAvgPool2dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), cceilMode, ccountIncludePad, cdivisorOverrideVal, cdivisorOverrideNull) if err = TorchErr(); err != nil { return retVal, err } @@ -2728,7 +3719,7 @@ func (ts *Tensor) AvgPool2dBackwardOut(gradInput *Tensor, gradOutput *Tensor, ke return retVal, err } -func (ts *Tensor) AvgPool2dOut(out *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride int64, del bool) (retVal *Tensor, err error) { +func (ts *Tensor) AvgPool2dOut(out *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() } @@ -2742,7 +3733,13 @@ func (ts *Tensor) AvgPool2dOut(out *Tensor, kernelSize []int64, stride []int64, if countIncludePad { ccountIncludePad = int32(1) } - lib.AtgAvgPool2dOut(ptr, out.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), cceilMode, ccountIncludePad, divisorOverride) + var cdivisorOverrideVal int64 = 0 + var cdivisorOverrideNull int = 1 + if len(divisorOverride) > 0 { + cdivisorOverrideVal = divisorOverride[0] + cdivisorOverrideNull = 0 + } + lib.AtgAvgPool2dOut(ptr, out.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), cceilMode, ccountIncludePad, cdivisorOverrideVal, cdivisorOverrideNull) if err = TorchErr(); err != nil { return retVal, err } @@ -2751,7 +3748,7 @@ func (ts *Tensor) AvgPool2dOut(out *Tensor, kernelSize []int64, stride []int64, return retVal, err } -func (ts *Tensor) AvgPool3d(kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride int64, del bool) (retVal *Tensor, err error) { +func (ts *Tensor) AvgPool3d(kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() } @@ -2765,7 +3762,13 @@ func (ts *Tensor) AvgPool3d(kernelSize []int64, stride []int64, padding []int64, if countIncludePad { ccountIncludePad = int32(1) } - lib.AtgAvgPool3d(ptr, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), cceilMode, ccountIncludePad, divisorOverride) + var cdivisorOverrideVal int64 = 0 + var cdivisorOverrideNull int = 1 + if len(divisorOverride) > 0 { + cdivisorOverrideVal = divisorOverride[0] + cdivisorOverrideNull = 0 + } + lib.AtgAvgPool3d(ptr, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), cceilMode, ccountIncludePad, cdivisorOverrideVal, cdivisorOverrideNull) if err = TorchErr(); err != nil { return retVal, err } @@ -2774,7 +3777,7 @@ func (ts *Tensor) AvgPool3d(kernelSize []int64, stride []int64, padding []int64, return retVal, err } -func (ts *Tensor) AvgPool3dBackward(gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride int64, del bool) (retVal *Tensor, err error) { +func (ts *Tensor) AvgPool3dBackward(gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() } @@ -2788,7 +3791,13 @@ func (ts *Tensor) AvgPool3dBackward(gradOutput *Tensor, kernelSize []int64, stri if countIncludePad { ccountIncludePad = int32(1) } - lib.AtgAvgPool3dBackward(ptr, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), cceilMode, ccountIncludePad, divisorOverride) + var cdivisorOverrideVal int64 = 0 + var cdivisorOverrideNull int = 1 + if len(divisorOverride) > 0 { + cdivisorOverrideVal = divisorOverride[0] + cdivisorOverrideNull = 0 + } + lib.AtgAvgPool3dBackward(ptr, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), cceilMode, ccountIncludePad, cdivisorOverrideVal, cdivisorOverrideNull) if err = TorchErr(); err != nil { return retVal, err } @@ -2797,7 +3806,7 @@ func (ts *Tensor) AvgPool3dBackward(gradOutput *Tensor, kernelSize []int64, stri return retVal, err } -func (ts *Tensor) AvgPool3dBackwardOut(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride int64, del bool) (retVal *Tensor, err error) { +func (ts *Tensor) AvgPool3dBackwardOut(gradInput *Tensor, gradOutput *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() } @@ -2811,7 +3820,13 @@ func (ts *Tensor) AvgPool3dBackwardOut(gradInput *Tensor, gradOutput *Tensor, ke if countIncludePad { ccountIncludePad = int32(1) } - lib.AtgAvgPool3dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), cceilMode, ccountIncludePad, divisorOverride) + var cdivisorOverrideVal int64 = 0 + var cdivisorOverrideNull int = 1 + if len(divisorOverride) > 0 { + cdivisorOverrideVal = divisorOverride[0] + cdivisorOverrideNull = 0 + } + lib.AtgAvgPool3dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), cceilMode, ccountIncludePad, cdivisorOverrideVal, cdivisorOverrideNull) if err = TorchErr(); err != nil { return retVal, err } @@ -2820,7 +3835,7 @@ func (ts *Tensor) AvgPool3dBackwardOut(gradInput *Tensor, gradOutput *Tensor, ke return retVal, err } -func (ts *Tensor) AvgPool3dOut(out *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride int64, del bool) (retVal *Tensor, err error) { +func (ts *Tensor) AvgPool3dOut(out *Tensor, kernelSize []int64, stride []int64, padding []int64, ceilMode bool, countIncludePad bool, divisorOverride []int64, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() } @@ -2834,7 +3849,13 @@ func (ts *Tensor) AvgPool3dOut(out *Tensor, kernelSize []int64, stride []int64, if countIncludePad { ccountIncludePad = int32(1) } - lib.AtgAvgPool3dOut(ptr, out.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), cceilMode, ccountIncludePad, divisorOverride) + var cdivisorOverrideVal int64 = 0 + var cdivisorOverrideNull int = 1 + if len(divisorOverride) > 0 { + cdivisorOverrideVal = divisorOverride[0] + cdivisorOverrideNull = 0 + } + lib.AtgAvgPool3dOut(ptr, out.ctensor, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), cceilMode, ccountIncludePad, cdivisorOverrideVal, cdivisorOverrideNull) if err = TorchErr(); err != nil { return retVal, err } @@ -3152,6 +4173,18 @@ func (ts *Tensor) Bincount(weights *Tensor, minlength int64, del bool) (retVal * return retVal, err } +func Binomial(count *Tensor, prob *Tensor) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgBinomial(ptr, count.ctensor, prob.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func (ts *Tensor) BitwiseAnd(other *Scalar, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() @@ -3467,6 +4500,22 @@ func BlackmanWindow1(windowLength int64, periodic bool, optionsKind gotch.DType, return retVal, err } +func BlockDiag(tensors []Tensor) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var ctensors []lib.Ctensor + for _, t := range tensors { + ctensors = append(ctensors, t.ctensor) + } + lib.AtgBlockDiag(ptr, ctensors, len(ctensors)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func (ts *Tensor) Bmm(mat2 *Tensor, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() @@ -3497,6 +4546,72 @@ func (ts *Tensor) BmmOut(out *Tensor, mat2 *Tensor, del bool) (retVal *Tensor, e return retVal, err } +func (ts *Tensor) Bucketize(boundaries *Tensor, outInt32 bool, right bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + coutInt32 := int32(0) + if outInt32 { + coutInt32 = int32(1) + } + cright := int32(0) + if right { + cright = int32(1) + } + lib.AtgBucketize(ptr, ts.ctensor, boundaries.ctensor, coutInt32, cright) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Bucketize1(selfScalar *Scalar, boundaries *Tensor, outInt32 bool, right bool) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + coutInt32 := int32(0) + if outInt32 { + coutInt32 = int32(1) + } + cright := int32(0) + if right { + cright = int32(1) + } + lib.AtgBucketize1(ptr, selfScalar.cscalar, boundaries.ctensor, coutInt32, cright) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) BucketizeOut(out *Tensor, boundaries *Tensor, outInt32 bool, right bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + coutInt32 := int32(0) + if outInt32 { + coutInt32 = int32(1) + } + cright := int32(0) + if right { + cright = int32(1) + } + lib.AtgBucketizeOut(ptr, out.ctensor, ts.ctensor, boundaries.ctensor, coutInt32, cright) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func CartesianProd(tensors []Tensor) (retVal *Tensor, err error) { ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) @@ -3556,10 +4671,16 @@ func (ts *Tensor) Cauchy_(median float64, sigma float64) (err error) { return err } -func Cdist(x1 *Tensor, x2 *Tensor, p float64, computeMode int64) (retVal *Tensor, err error) { +func Cdist(x1 *Tensor, x2 *Tensor, p float64, computeMode []int64) (retVal *Tensor, err error) { ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - lib.AtgCdist(ptr, x1.ctensor, x2.ctensor, p, computeMode) + var ccomputeModeVal int64 = 0 + var ccomputeModeNull int = 1 + if len(computeMode) > 0 { + ccomputeModeVal = computeMode[0] + ccomputeModeNull = 0 + } + lib.AtgCdist(ptr, x1.ctensor, x2.ctensor, p, ccomputeModeVal, ccomputeModeNull) if err = TorchErr(); err != nil { return retVal, err } @@ -3651,6 +4772,21 @@ func ChainMatmul(matrices []Tensor) (retVal *Tensor, err error) { return retVal, err } +func (ts *Tensor) ChannelShuffle(groups int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgChannelShuffle(ptr, ts.ctensor, groups) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func (ts *Tensor) Cholesky(upper bool, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() @@ -3888,6 +5024,47 @@ func (ts *Tensor) ClampOut(out *Tensor, min *Scalar, max *Scalar, del bool) (ret return retVal, err } +func (ts *Tensor) Clip(min *Scalar, max *Scalar, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgClip(ptr, ts.ctensor, min.cscalar, max.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Clip_(min *Scalar, max *Scalar) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgClip_(ptr, ts.ctensor, min.cscalar, max.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) ClipOut(out *Tensor, min *Scalar, max *Scalar, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgClipOut(ptr, out.ctensor, ts.ctensor, min.cscalar, max.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func (ts *Tensor) Coalesce(del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() @@ -3976,6 +5153,30 @@ func (ts *Tensor) Combinations(r int64, withReplacement bool, del bool) (retVal return retVal, err } +func Complex(real *Tensor, imag *Tensor) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgComplex(ptr, real.ctensor, imag.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func ComplexOut(out *Tensor, real *Tensor, imag *Tensor) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgComplexOut(ptr, out.ctensor, real.ctensor, imag.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func (ts *Tensor) Conj(del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() @@ -4276,13 +5477,13 @@ func CosineSimilarity(x1 *Tensor, x2 *Tensor, dim int64, eps float64) (retVal *T return retVal, err } -func (ts *Tensor) Cross(other *Tensor, dim int64, del bool) (retVal *Tensor, err error) { +func (ts *Tensor) CountNonzero(dim []int64, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() } ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - lib.AtgCross(ptr, ts.ctensor, other.ctensor, dim) + lib.AtgCountNonzero(ptr, ts.ctensor, dim, len(dim)) if err = TorchErr(); err != nil { return retVal, err } @@ -4291,13 +5492,61 @@ func (ts *Tensor) Cross(other *Tensor, dim int64, del bool) (retVal *Tensor, err return retVal, err } -func (ts *Tensor) CrossOut(out *Tensor, other *Tensor, dim int64, del bool) (retVal *Tensor, err error) { +func (ts *Tensor) CountNonzero1(dim []int64, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() } ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - lib.AtgCrossOut(ptr, out.ctensor, ts.ctensor, other.ctensor, dim) + var cdimVal int64 = 0 + var cdimNull int = 1 + if len(dim) > 0 { + cdimVal = dim[0] + cdimNull = 0 + } + lib.AtgCountNonzero1(ptr, ts.ctensor, cdimVal, cdimNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Cross(other *Tensor, dim []int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cdimVal int64 = 0 + var cdimNull int = 1 + if len(dim) > 0 { + cdimVal = dim[0] + cdimNull = 0 + } + lib.AtgCross(ptr, ts.ctensor, other.ctensor, cdimVal, cdimNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) CrossOut(out *Tensor, other *Tensor, dim []int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cdimVal int64 = 0 + var cdimNull int = 1 + if len(dim) > 0 { + cdimVal = dim[0] + cdimNull = 0 + } + lib.AtgCrossOut(ptr, out.ctensor, ts.ctensor, other.ctensor, cdimVal, cdimNull) if err = TorchErr(); err != nil { return retVal, err } @@ -4408,27 +5657,7 @@ func (ts *Tensor) CudnnConvolution1(weight *Tensor, bias *Tensor, padding []int6 return retVal, err } -func CudnnConvolutionBackwardInput(selfSize []int64, gradOutput *Tensor, weight *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cbenchmark := int32(0) - if benchmark { - cbenchmark = int32(1) - } - cdeterministic := int32(0) - if deterministic { - cdeterministic = int32(1) - } - lib.AtgCudnnConvolutionBackwardInput(ptr, selfSize, len(selfSize), gradOutput.ctensor, weight.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) CudnnConvolutionBackwardWeight(weightSize []int64, gradOutput *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool) (retVal *Tensor, err error) { +func (ts *Tensor) CudnnConvolution2(weight *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, allowTf32 bool, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() } @@ -4442,7 +5671,62 @@ func (ts *Tensor) CudnnConvolutionBackwardWeight(weightSize []int64, gradOutput if deterministic { cdeterministic = int32(1) } - lib.AtgCudnnConvolutionBackwardWeight(ptr, weightSize, len(weightSize), gradOutput.ctensor, ts.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic) + callowTf32 := int32(0) + if allowTf32 { + callowTf32 = int32(1) + } + lib.AtgCudnnConvolution2(ptr, ts.ctensor, weight.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic, callowTf32) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func CudnnConvolutionBackwardInput(selfSize []int64, gradOutput *Tensor, weight *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, allowTf32 bool) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cbenchmark := int32(0) + if benchmark { + cbenchmark = int32(1) + } + cdeterministic := int32(0) + if deterministic { + cdeterministic = int32(1) + } + callowTf32 := int32(0) + if allowTf32 { + callowTf32 = int32(1) + } + lib.AtgCudnnConvolutionBackwardInput(ptr, selfSize, len(selfSize), gradOutput.ctensor, weight.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic, callowTf32) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) CudnnConvolutionBackwardWeight(weightSize []int64, gradOutput *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, allowTf32 bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cbenchmark := int32(0) + if benchmark { + cbenchmark = int32(1) + } + cdeterministic := int32(0) + if deterministic { + cdeterministic = int32(1) + } + callowTf32 := int32(0) + if allowTf32 { + callowTf32 = int32(1) + } + lib.AtgCudnnConvolutionBackwardWeight(ptr, weightSize, len(weightSize), gradOutput.ctensor, ts.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic, callowTf32) if err = TorchErr(); err != nil { return retVal, err } @@ -4497,27 +5781,7 @@ func (ts *Tensor) CudnnConvolutionTranspose1(weight *Tensor, bias *Tensor, paddi return retVal, err } -func CudnnConvolutionTransposeBackwardInput(gradOutput *Tensor, weight *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool) (retVal *Tensor, err error) { - ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - - cbenchmark := int32(0) - if benchmark { - cbenchmark = int32(1) - } - cdeterministic := int32(0) - if deterministic { - cdeterministic = int32(1) - } - lib.AtgCudnnConvolutionTransposeBackwardInput(ptr, gradOutput.ctensor, weight.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic) - if err = TorchErr(); err != nil { - return retVal, err - } - retVal = &Tensor{ctensor: *ptr} - - return retVal, err -} - -func (ts *Tensor) CudnnConvolutionTransposeBackwardWeight(weightSize []int64, gradOutput *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, del bool) (retVal *Tensor, err error) { +func (ts *Tensor) CudnnConvolutionTranspose2(weight *Tensor, padding []int64, outputPadding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, allowTf32 bool, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() } @@ -4531,7 +5795,62 @@ func (ts *Tensor) CudnnConvolutionTransposeBackwardWeight(weightSize []int64, gr if deterministic { cdeterministic = int32(1) } - lib.AtgCudnnConvolutionTransposeBackwardWeight(ptr, weightSize, len(weightSize), gradOutput.ctensor, ts.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic) + callowTf32 := int32(0) + if allowTf32 { + callowTf32 = int32(1) + } + lib.AtgCudnnConvolutionTranspose2(ptr, ts.ctensor, weight.ctensor, padding, len(padding), outputPadding, len(outputPadding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic, callowTf32) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func CudnnConvolutionTransposeBackwardInput(gradOutput *Tensor, weight *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, allowTf32 bool) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cbenchmark := int32(0) + if benchmark { + cbenchmark = int32(1) + } + cdeterministic := int32(0) + if deterministic { + cdeterministic = int32(1) + } + callowTf32 := int32(0) + if allowTf32 { + callowTf32 = int32(1) + } + lib.AtgCudnnConvolutionTransposeBackwardInput(ptr, gradOutput.ctensor, weight.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic, callowTf32) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) CudnnConvolutionTransposeBackwardWeight(weightSize []int64, gradOutput *Tensor, padding []int64, stride []int64, dilation []int64, groups int64, benchmark bool, deterministic bool, allowTf32 bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cbenchmark := int32(0) + if benchmark { + cbenchmark = int32(1) + } + cdeterministic := int32(0) + if deterministic { + cdeterministic = int32(1) + } + callowTf32 := int32(0) + if allowTf32 { + callowTf32 = int32(1) + } + lib.AtgCudnnConvolutionTransposeBackwardWeight(ptr, weightSize, len(weightSize), gradOutput.ctensor, ts.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups, cbenchmark, cdeterministic, callowTf32) if err = TorchErr(); err != nil { return retVal, err } @@ -4555,6 +5874,18 @@ func (ts *Tensor) CudnnGridSampler(grid *Tensor, del bool) (retVal *Tensor, err return retVal, err } +func CummaxminBackward(grad *Tensor, input *Tensor, indices *Tensor, dim int64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCummaxminBackward(ptr, grad.ctensor, input.ctensor, indices.ctensor, dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func (ts *Tensor) Cumprod(dim int64, dtype gotch.DType, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() @@ -4570,6 +5901,18 @@ func (ts *Tensor) Cumprod(dim int64, dtype gotch.DType, del bool) (retVal *Tenso return retVal, err } +func CumprodBackward(grad *Tensor, input *Tensor, dim int64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgCumprodBackward(ptr, grad.ctensor, input.ctensor, dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func (ts *Tensor) CumprodOut(out *Tensor, dim int64, dtype gotch.DType, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() @@ -4630,6 +5973,47 @@ func (ts *Tensor) Data(del bool) (retVal *Tensor, err error) { return retVal, err } +func (ts *Tensor) Deg2rad(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDeg2rad(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Deg2rad_() (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDeg2rad_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) Deg2radOut(out *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDeg2radOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func (ts *Tensor) Dequantize(del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() @@ -4701,6 +6085,18 @@ func (ts *Tensor) Diag(diagonal int64, del bool) (retVal *Tensor, err error) { return retVal, err } +func DiagBackward(grad *Tensor, inputSizes []int64, diagonal int64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDiagBackward(ptr, grad.ctensor, inputSizes, len(inputSizes), diagonal) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func (ts *Tensor) DiagEmbed(offset int64, dim1 int64, dim2 int64, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() @@ -4761,6 +6157,18 @@ func (ts *Tensor) Diagonal(offset int64, dim1 int64, dim2 int64, del bool) (retV return retVal, err } +func DiagonalBackward(grad *Tensor, inputSizes []int64, offset int64, dim1 int64, dim2 int64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDiagonalBackward(ptr, grad.ctensor, inputSizes, len(inputSizes), offset, dim1, dim2) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func (ts *Tensor) Digamma(del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() @@ -4884,6 +6292,73 @@ func (ts *Tensor) DivOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, return retVal, err } +func (ts *Tensor) Divide(other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDivide(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Divide1(other *Scalar, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDivide1(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Divide_(other *Tensor) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDivide_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) Divide1_(other *Scalar) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDivide1_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) DivideOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgDivideOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func (ts *Tensor) Dot(tensor *Tensor, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() @@ -4945,6 +6420,38 @@ func (ts *Tensor) Dropout_(p float64, train bool) (err error) { return err } +func Dstack(tensors []Tensor) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var ctensors []lib.Ctensor + for _, t := range tensors { + ctensors = append(ctensors, t.ctensor) + } + lib.AtgDstack(ptr, ctensors, len(ctensors)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func DstackOut(out *Tensor, tensors []Tensor) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var ctensors []lib.Ctensor + for _, t := range tensors { + ctensors = append(ctensors, t.ctensor) + } + lib.AtgDstackOut(ptr, out.ctensor, ctensors, len(ctensors)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func Einsum(equation string, tensors []Tensor) (retVal *Tensor, err error) { ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) @@ -5136,6 +6643,18 @@ func (ts *Tensor) EmptyLike(del bool) (retVal *Tensor, err error) { return retVal, err } +func EmptyMeta(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgEmptyMeta(ptr, size, len(size), optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func EmptyOut(out *Tensor, size []int64) (retVal *Tensor, err error) { ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) @@ -5148,6 +6667,18 @@ func EmptyOut(out *Tensor, size []int64) (retVal *Tensor, err error) { return retVal, err } +func EmptyQuantized(size []int64, qtensor *Tensor) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgEmptyQuantized(ptr, size, len(size), qtensor.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func EmptyStrided(size []int64, stride []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) @@ -5380,6 +6911,47 @@ func (ts *Tensor) Exp(del bool) (retVal *Tensor, err error) { return retVal, err } +func (ts *Tensor) Exp2(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgExp2(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Exp2_() (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgExp2_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) Exp2Out(out *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgExp2Out(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func (ts *Tensor) Exp_() (err error) { ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) @@ -5765,6 +7337,192 @@ func (ts *Tensor) Fft(signalNdim int64, normalized bool, del bool) (retVal *Tens return retVal, err } +func (ts *Tensor) FftFft(n []int64, dim int64, norm string, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cnVal int64 = 0 + var cnNull int = 1 + if len(n) > 0 { + cnVal = n[0] + cnNull = 0 + } + lib.AtgFftFft(ptr, ts.ctensor, cnVal, cnNull, dim, norm) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) FftFftn(s []int64, dim []int64, norm string, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFftFftn(ptr, ts.ctensor, s, len(s), dim, len(dim), norm) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) FftHfft(n []int64, dim int64, norm string, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cnVal int64 = 0 + var cnNull int = 1 + if len(n) > 0 { + cnVal = n[0] + cnNull = 0 + } + lib.AtgFftHfft(ptr, ts.ctensor, cnVal, cnNull, dim, norm) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) FftIfft(n []int64, dim int64, norm string, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cnVal int64 = 0 + var cnNull int = 1 + if len(n) > 0 { + cnVal = n[0] + cnNull = 0 + } + lib.AtgFftIfft(ptr, ts.ctensor, cnVal, cnNull, dim, norm) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) FftIfftn(s []int64, dim []int64, norm string, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFftIfftn(ptr, ts.ctensor, s, len(s), dim, len(dim), norm) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) FftIhfft(n []int64, dim int64, norm string, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cnVal int64 = 0 + var cnNull int = 1 + if len(n) > 0 { + cnVal = n[0] + cnNull = 0 + } + lib.AtgFftIhfft(ptr, ts.ctensor, cnVal, cnNull, dim, norm) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) FftIrfft(n []int64, dim int64, norm string, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cnVal int64 = 0 + var cnNull int = 1 + if len(n) > 0 { + cnVal = n[0] + cnNull = 0 + } + lib.AtgFftIrfft(ptr, ts.ctensor, cnVal, cnNull, dim, norm) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) FftIrfftn(s []int64, dim []int64, norm string, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFftIrfftn(ptr, ts.ctensor, s, len(s), dim, len(dim), norm) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) FftRfft(n []int64, dim int64, norm string, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cnVal int64 = 0 + var cnNull int = 1 + if len(n) > 0 { + cnVal = n[0] + cnNull = 0 + } + lib.AtgFftRfft(ptr, ts.ctensor, cnVal, cnNull, dim, norm) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) FftRfftn(s []int64, dim []int64, norm string, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFftRfftn(ptr, ts.ctensor, s, len(s), dim, len(dim), norm) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func (ts *Tensor) Fill_(value *Scalar) (err error) { ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) @@ -5802,6 +7560,47 @@ func (ts *Tensor) FillDiagonal_(fillValue *Scalar, wrap bool) (err error) { return err } +func (ts *Tensor) Fix(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFix(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Fix_() (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFix_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) FixOut(out *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFixOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func (ts *Tensor) Flatten(startDim int64, endDim int64, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() @@ -5832,6 +7631,36 @@ func (ts *Tensor) Flip(dims []int64, del bool) (retVal *Tensor, err error) { return retVal, err } +func (ts *Tensor) Fliplr(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFliplr(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Flipud(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgFlipud(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func (ts *Tensor) Floor(del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() @@ -6176,14 +8005,20 @@ func (ts *Tensor) FrobeniusNormOut(out *Tensor, dim []int64, keepdim bool, del b return retVal, err } -func FromFile(filename string, shared bool, size int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { +func FromFile(filename string, shared bool, size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) cshared := int32(0) if shared { cshared = int32(1) } - lib.AtgFromFile(ptr, filename, cshared, size, optionsKind.CInt(), optionsDevice.CInt()) + var csizeVal int64 = 0 + var csizeNull int = 1 + if len(size) > 0 { + csizeVal = size[0] + csizeNull = 0 + } + lib.AtgFromFile(ptr, filename, cshared, csizeVal, csizeNull, optionsKind.CInt(), optionsDevice.CInt()) if err = TorchErr(); err != nil { return retVal, err } @@ -6250,6 +8085,25 @@ func (ts *Tensor) Gather(dim int64, index *Tensor, sparseGrad bool, del bool) (r return retVal, err } +func (ts *Tensor) GatherBackward(grad *Tensor, dim int64, index *Tensor, sparseGrad bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + csparseGrad := int32(0) + if sparseGrad { + csparseGrad = int32(1) + } + lib.AtgGatherBackward(ptr, grad.ctensor, ts.ctensor, dim, index.ctensor, csparseGrad) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func (ts *Tensor) GatherOut(out *Tensor, dim int64, index *Tensor, sparseGrad bool, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() @@ -6269,6 +8123,47 @@ func (ts *Tensor) GatherOut(out *Tensor, dim int64, index *Tensor, sparseGrad bo return retVal, err } +func (ts *Tensor) Gcd(other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGcd(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Gcd_(other *Tensor) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGcd_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) GcdOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGcdOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func (ts *Tensor) Ge(other *Scalar, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() @@ -6497,6 +8392,170 @@ func (ts *Tensor) Grad(del bool) (retVal *Tensor, err error) { return retVal, err } +func (ts *Tensor) Greater(other *Scalar, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGreater(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Greater1(other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGreater1(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Greater_(other *Scalar) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGreater_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) Greater1_(other *Tensor) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGreater1_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) GreaterEqual(other *Scalar, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGreaterEqual(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) GreaterEqual1(other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGreaterEqual1(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) GreaterEqual_(other *Scalar) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGreaterEqual_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) GreaterEqual1_(other *Tensor) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGreaterEqual1_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) GreaterEqualOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGreaterEqualOut(ptr, out.ctensor, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) GreaterEqualOut1(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGreaterEqualOut1(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) GreaterOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGreaterOut(ptr, out.ctensor, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) GreaterOut1(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgGreaterOut1(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func GridSampler(input *Tensor, grid *Tensor, interpolationMode int64, paddingMode int64, alignCorners bool) (retVal *Tensor, err error) { ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) @@ -6829,6 +8888,62 @@ func (ts *Tensor) HardsigmoidOut(out *Tensor, del bool) (retVal *Tensor, err err return retVal, err } +func (ts *Tensor) Hardswish(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHardswish(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Hardswish_() (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHardswish_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) HardswishBackward(gradOutput *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHardswishBackward(ptr, gradOutput.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) HardswishOut(out *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHardswishOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func (ts *Tensor) Hardtanh(del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() @@ -6900,6 +9015,47 @@ func (ts *Tensor) HardtanhOut(out *Tensor, del bool) (retVal *Tensor, err error) return retVal, err } +func (ts *Tensor) Heaviside(values *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHeaviside(ptr, ts.ctensor, values.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Heaviside_(values *Tensor) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHeaviside_(ptr, ts.ctensor, values.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) HeavisideOut(out *Tensor, values *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHeavisideOut(ptr, out.ctensor, ts.ctensor, values.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func (ts *Tensor) HingeEmbeddingLoss(target *Tensor, margin float64, reduction int64, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() @@ -6969,6 +9125,120 @@ func HspmmOut(out *Tensor, mat1 *Tensor, mat2 *Tensor) (retVal *Tensor, err erro return retVal, err } +func Hstack(tensors []Tensor) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var ctensors []lib.Ctensor + for _, t := range tensors { + ctensors = append(ctensors, t.ctensor) + } + lib.AtgHstack(ptr, ctensors, len(ctensors)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func HstackOut(out *Tensor, tensors []Tensor) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var ctensors []lib.Ctensor + for _, t := range tensors { + ctensors = append(ctensors, t.ctensor) + } + lib.AtgHstackOut(ptr, out.ctensor, ctensors, len(ctensors)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Hypot(other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHypot(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Hypot_(other *Tensor) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHypot_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) HypotOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgHypotOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) I0(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgI0(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) I0_() (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgI0_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) I0Out(out *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgI0Out(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func (ts *Tensor) Ifft(signalNdim int64, normalized bool, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() @@ -7237,6 +9507,18 @@ func (ts *Tensor) IndexSelect(dim int64, index *Tensor, del bool) (retVal *Tenso return retVal, err } +func IndexSelectBackward(grad *Tensor, selfSizes []int64, dim int64, index *Tensor) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIndexSelectBackward(ptr, grad.ctensor, selfSizes, len(selfSizes), dim, index.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func (ts *Tensor) IndexSelectOut(out *Tensor, dim int64, index *Tensor, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() @@ -7267,6 +9549,21 @@ func (ts *Tensor) Indices(del bool) (retVal *Tensor, err error) { return retVal, err } +func (ts *Tensor) InfinitelyDifferentiableGeluBackward(grad *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgInfinitelyDifferentiableGeluBackward(ptr, grad.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func InstanceNorm(input *Tensor, weight *Tensor, bias *Tensor, runningMean *Tensor, runningVar *Tensor, useInputStats bool, momentum float64, eps float64, cudnnEnabled bool) (retVal *Tensor, err error) { ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) @@ -7419,13 +9716,13 @@ func (ts *Tensor) Isnan(del bool) (retVal *Tensor, err error) { return retVal, err } -func (ts *Tensor) KlDiv(target *Tensor, reduction int64, del bool) (retVal *Tensor, err error) { +func (ts *Tensor) Isneginf(del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() } ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - lib.AtgKlDiv(ptr, ts.ctensor, target.ctensor, reduction) + lib.AtgIsneginf(ptr, ts.ctensor) if err = TorchErr(); err != nil { return retVal, err } @@ -7434,13 +9731,189 @@ func (ts *Tensor) KlDiv(target *Tensor, reduction int64, del bool) (retVal *Tens return retVal, err } -func (ts *Tensor) KlDivBackward(gradOutput *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor, err error) { +func (ts *Tensor) IsneginfOut(out *Tensor, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() } ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - lib.AtgKlDivBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction) + lib.AtgIsneginfOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Isposinf(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIsposinf(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) IsposinfOut(out *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIsposinfOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Isreal(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgIsreal(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Istft(nFft int64, hopLength []int64, winLength []int64, window *Tensor, center bool, normalized bool, onesided bool, length []int64, returnComplex bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var chopLengthVal int64 = 0 + var chopLengthNull int = 1 + if len(hopLength) > 0 { + chopLengthVal = hopLength[0] + chopLengthNull = 0 + } + var cwinLengthVal int64 = 0 + var cwinLengthNull int = 1 + if len(winLength) > 0 { + cwinLengthVal = winLength[0] + cwinLengthNull = 0 + } + ccenter := int32(0) + if center { + ccenter = int32(1) + } + cnormalized := int32(0) + if normalized { + cnormalized = int32(1) + } + conesided := int32(0) + if onesided { + conesided = int32(1) + } + var clengthVal int64 = 0 + var clengthNull int = 1 + if len(length) > 0 { + clengthVal = length[0] + clengthNull = 0 + } + creturnComplex := int32(0) + if returnComplex { + creturnComplex = int32(1) + } + lib.AtgIstft(ptr, ts.ctensor, nFft, chopLengthVal, chopLengthNull, cwinLengthVal, cwinLengthNull, window.ctensor, ccenter, cnormalized, conesided, clengthVal, clengthNull, creturnComplex) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func KaiserWindow(windowLength int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgKaiserWindow(ptr, windowLength, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func KaiserWindow1(windowLength int64, periodic bool, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cperiodic := int32(0) + if periodic { + cperiodic = int32(1) + } + lib.AtgKaiserWindow1(ptr, windowLength, cperiodic, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func KaiserWindow2(windowLength int64, periodic bool, beta float64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cperiodic := int32(0) + if periodic { + cperiodic = int32(1) + } + lib.AtgKaiserWindow2(ptr, windowLength, cperiodic, beta, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) KlDiv(target *Tensor, reduction int64, logTarget bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + clogTarget := int32(0) + if logTarget { + clogTarget = int32(1) + } + lib.AtgKlDiv(ptr, ts.ctensor, target.ctensor, reduction, clogTarget) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) KlDivBackward(gradOutput *Tensor, target *Tensor, reduction int64, logTarget bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + clogTarget := int32(0) + if logTarget { + clogTarget = int32(1) + } + lib.AtgKlDivBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction, clogTarget) if err = TorchErr(); err != nil { return retVal, err } @@ -7525,6 +9998,47 @@ func LayerNorm(input *Tensor, normalizedShape []int64, weight *Tensor, bias *Ten return retVal, err } +func (ts *Tensor) Lcm(other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLcm(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Lcm_(other *Tensor) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLcm_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) LcmOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLcmOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func (ts *Tensor) Le(other *Scalar, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() @@ -7749,6 +10263,170 @@ func (ts *Tensor) LerpOut1(out *Tensor, end *Tensor, weight *Tensor, del bool) ( return retVal, err } +func (ts *Tensor) Less(other *Scalar, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLess(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Less1(other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLess1(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Less_(other *Scalar) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLess_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) Less1_(other *Tensor) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLess1_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) LessEqual(other *Scalar, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLessEqual(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) LessEqual1(other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLessEqual1(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) LessEqual_(other *Scalar) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLessEqual_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) LessEqual1_(other *Tensor) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLessEqual1_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) LessEqualOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLessEqualOut(ptr, out.ctensor, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) LessEqualOut1(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLessEqualOut1(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) LessOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLessOut(ptr, out.ctensor, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) LessOut1(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLessOut1(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func (ts *Tensor) Lgamma(del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() @@ -7790,6 +10468,97 @@ func (ts *Tensor) LgammaOut(out *Tensor, del bool) (retVal *Tensor, err error) { return retVal, err } +func (ts *Tensor) LinalgDet(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLinalgDet(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) LinalgNorm(ord *Scalar, dim []int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { + ckeepdim = int32(1) + } + lib.AtgLinalgNorm(ptr, ts.ctensor, ord.cscalar, dim, len(dim), ckeepdim, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) LinalgNorm1(ord string, dim []int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { + ckeepdim = int32(1) + } + lib.AtgLinalgNorm1(ptr, ts.ctensor, ord, dim, len(dim), ckeepdim, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) LinalgNormOut(out *Tensor, ord *Scalar, dim []int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { + ckeepdim = int32(1) + } + lib.AtgLinalgNormOut(ptr, out.ctensor, ts.ctensor, ord.cscalar, dim, len(dim), ckeepdim, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) LinalgNormOut1(out *Tensor, ord string, dim []int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { + ckeepdim = int32(1) + } + lib.AtgLinalgNormOut1(ptr, out.ctensor, ts.ctensor, ord, dim, len(dim), ckeepdim, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func Linear(input *Tensor, weight *Tensor, bias *Tensor) (retVal *Tensor, err error) { ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) @@ -7802,10 +10571,16 @@ func Linear(input *Tensor, weight *Tensor, bias *Tensor) (retVal *Tensor, err er return retVal, err } -func Linspace(start *Scalar, end *Scalar, steps int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { +func Linspace(start *Scalar, end *Scalar, steps []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - lib.AtgLinspace(ptr, start.cscalar, end.cscalar, steps, optionsKind.CInt(), optionsDevice.CInt()) + var cstepsVal int64 = 0 + var cstepsNull int = 1 + if len(steps) > 0 { + cstepsVal = steps[0] + cstepsNull = 0 + } + lib.AtgLinspace(ptr, start.cscalar, end.cscalar, cstepsVal, cstepsNull, optionsKind.CInt(), optionsDevice.CInt()) if err = TorchErr(); err != nil { return retVal, err } @@ -7814,10 +10589,16 @@ func Linspace(start *Scalar, end *Scalar, steps int64, optionsKind gotch.DType, return retVal, err } -func LinspaceOut(out *Tensor, start *Scalar, end *Scalar, steps int64) (retVal *Tensor, err error) { +func LinspaceOut(out *Tensor, start *Scalar, end *Scalar, steps []int64) (retVal *Tensor, err error) { ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - lib.AtgLinspaceOut(ptr, out.ctensor, start.cscalar, end.cscalar, steps) + var cstepsVal int64 = 0 + var cstepsNull int = 1 + if len(steps) > 0 { + cstepsVal = steps[0] + cstepsNull = 0 + } + lib.AtgLinspaceOut(ptr, out.ctensor, start.cscalar, end.cscalar, cstepsVal, cstepsNull) if err = TorchErr(); err != nil { return retVal, err } @@ -8076,6 +10857,96 @@ func (ts *Tensor) LogSoftmax(dim int64, dtype gotch.DType, del bool) (retVal *Te return retVal, err } +func (ts *Tensor) Logaddexp(other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogaddexp(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Logaddexp2(other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogaddexp2(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Logaddexp2Out(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogaddexp2Out(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) LogaddexpOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogaddexpOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Logcumsumexp(dim int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogcumsumexp(ptr, ts.ctensor, dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) LogcumsumexpOut(out *Tensor, dim int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgLogcumsumexpOut(ptr, out.ctensor, ts.ctensor, dim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func (ts *Tensor) Logdet(del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() @@ -8255,10 +11126,19 @@ func (ts *Tensor) LogicalXorOut(out *Tensor, other *Tensor, del bool) (retVal *T return retVal, err } -func Logspace(start *Scalar, end *Scalar, steps int64, base float64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { +func (ts *Tensor) Logit(eps []float64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - lib.AtgLogspace(ptr, start.cscalar, end.cscalar, steps, base, optionsKind.CInt(), optionsDevice.CInt()) + var cepsVal float64 = 0.0 + var cepsNull int = 1 + if len(eps) > 0 { + cepsVal = eps[0] + cepsNull = 0 + } + lib.AtgLogit(ptr, ts.ctensor, cepsVal, cepsNull) if err = TorchErr(); err != nil { return retVal, err } @@ -8267,10 +11147,114 @@ func Logspace(start *Scalar, end *Scalar, steps int64, base float64, optionsKind return retVal, err } -func LogspaceOut(out *Tensor, start *Scalar, end *Scalar, steps int64, base float64) (retVal *Tensor, err error) { +func (ts *Tensor) Logit_(eps []float64) (err error) { ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - lib.AtgLogspaceOut(ptr, out.ctensor, start.cscalar, end.cscalar, steps, base) + var cepsVal float64 = 0.0 + var cepsNull int = 1 + if len(eps) > 0 { + cepsVal = eps[0] + cepsNull = 0 + } + lib.AtgLogit_(ptr, ts.ctensor, cepsVal, cepsNull) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) LogitBackward(gradOutput *Tensor, eps []float64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cepsVal float64 = 0.0 + var cepsNull int = 1 + if len(eps) > 0 { + cepsVal = eps[0] + cepsNull = 0 + } + lib.AtgLogitBackward(ptr, gradOutput.ctensor, ts.ctensor, cepsVal, cepsNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) LogitBackwardOut(gradInput *Tensor, gradOutput *Tensor, eps []float64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cepsVal float64 = 0.0 + var cepsNull int = 1 + if len(eps) > 0 { + cepsVal = eps[0] + cepsNull = 0 + } + lib.AtgLogitBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, cepsVal, cepsNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) LogitOut(out *Tensor, eps []float64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cepsVal float64 = 0.0 + var cepsNull int = 1 + if len(eps) > 0 { + cepsVal = eps[0] + cepsNull = 0 + } + lib.AtgLogitOut(ptr, out.ctensor, ts.ctensor, cepsVal, cepsNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Logspace(start *Scalar, end *Scalar, steps []int64, base float64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cstepsVal int64 = 0 + var cstepsNull int = 1 + if len(steps) > 0 { + cstepsVal = steps[0] + cstepsNull = 0 + } + lib.AtgLogspace(ptr, start.cscalar, end.cscalar, cstepsVal, cstepsNull, base, optionsKind.CInt(), optionsDevice.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func LogspaceOut(out *Tensor, start *Scalar, end *Scalar, steps []int64, base float64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cstepsVal int64 = 0 + var cstepsNull int = 1 + if len(steps) > 0 { + cstepsVal = steps[0] + cstepsNull = 0 + } + lib.AtgLogspaceOut(ptr, out.ctensor, start.cscalar, end.cscalar, cstepsVal, cstepsNull, base) if err = TorchErr(); err != nil { return retVal, err } @@ -8534,6 +11518,18 @@ func (ts *Tensor) MaskedSelect(mask *Tensor, del bool) (retVal *Tensor, err erro return retVal, err } +func MaskedSelectBackward(grad *Tensor, input *Tensor, mask *Tensor) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMaskedSelectBackward(ptr, grad.ctensor, input.ctensor, mask.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func (ts *Tensor) MaskedSelectOut(out *Tensor, mask *Tensor, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() @@ -8579,6 +11575,36 @@ func (ts *Tensor) MatmulOut(out *Tensor, other *Tensor, del bool) (retVal *Tenso return retVal, err } +func (ts *Tensor) MatrixExp(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMatrixExp(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) MatrixExpBackward(grad *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMatrixExpBackward(ptr, ts.ctensor, grad.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func (ts *Tensor) MatrixPower(n int64, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() @@ -8930,17 +11956,28 @@ func (ts *Tensor) MaxUnpool3dOut(out *Tensor, indices *Tensor, outputSize []int6 return retVal, err } -func (ts *Tensor) MaxValues(dim []int64, keepdim bool, del bool) (retVal *Tensor, err error) { +func (ts *Tensor) Maximum(other *Tensor, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() } ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) + lib.AtgMaximum(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err } - lib.AtgMaxValues(ptr, ts.ctensor, dim, len(dim), ckeepdim) + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) MaximumOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMaximumOut(ptr, out.ctensor, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { return retVal, err } @@ -9062,17 +12099,28 @@ func (ts *Tensor) MinOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, return retVal, err } -func (ts *Tensor) MinValues(dim []int64, keepdim bool, del bool) (retVal *Tensor, err error) { +func (ts *Tensor) Minimum(other *Tensor, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() } ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - ckeepdim := int32(0) - if keepdim { - ckeepdim = int32(1) + lib.AtgMinimum(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err } - lib.AtgMinValues(ptr, ts.ctensor, dim, len(dim), ckeepdim) + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) MinimumOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMinimumOut(ptr, out.ctensor, ts.ctensor, other.ctensor) if err = TorchErr(); err != nil { return retVal, err } @@ -9368,6 +12416,25 @@ func (ts *Tensor) MkldnnMaxPool2d(kernelSize []int64, stride []int64, padding [] return retVal, err } +func (ts *Tensor) MkldnnMaxPool3d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cceilMode := int32(0) + if ceilMode { + cceilMode = int32(1) + } + lib.AtgMkldnnMaxPool3d(ptr, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func (ts *Tensor) MkldnnReorderConv2dWeight(padding []int64, stride []int64, dilation []int64, groups int64, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() @@ -9383,6 +12450,21 @@ func (ts *Tensor) MkldnnReorderConv2dWeight(padding []int64, stride []int64, dil return retVal, err } +func (ts *Tensor) MkldnnReorderConv3dWeight(padding []int64, stride []int64, dilation []int64, groups int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMkldnnReorderConv3dWeight(ptr, ts.ctensor, padding, len(padding), stride, len(stride), dilation, len(dilation), groups) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func (ts *Tensor) Mm(mat2 *Tensor, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() @@ -9413,6 +12495,36 @@ func (ts *Tensor) MmOut(out *Tensor, mat2 *Tensor, del bool) (retVal *Tensor, er return retVal, err } +func (ts *Tensor) Movedim(source []int64, destination []int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMovedim(ptr, ts.ctensor, source, len(source), destination, len(destination)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Movedim1(source int64, destination int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMovedim1(ptr, ts.ctensor, source, destination) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func (ts *Tensor) MseLoss(target *Tensor, reduction int64, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() @@ -9668,6 +12780,73 @@ func (ts *Tensor) MultinomialOut(out *Tensor, numSamples int64, replacement bool return retVal, err } +func (ts *Tensor) Multiply(other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMultiply(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Multiply1(other *Scalar, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMultiply1(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Multiply_(other *Tensor) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMultiply_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) Multiply1_(other *Scalar) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMultiply1_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) MultiplyOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgMultiplyOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func (ts *Tensor) Mv(vec *Tensor, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() @@ -9724,6 +12903,159 @@ func (ts *Tensor) Mvlgamma_(p int64) (err error) { return err } +func (ts *Tensor) Nanquantile(q float64, dim []int64, keepdim bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cdimVal int64 = 0 + var cdimNull int = 1 + if len(dim) > 0 { + cdimVal = dim[0] + cdimNull = 0 + } + ckeepdim := int32(0) + if keepdim { + ckeepdim = int32(1) + } + lib.AtgNanquantile(ptr, ts.ctensor, q, cdimVal, cdimNull, ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Nanquantile1(q *Tensor, dim []int64, keepdim bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cdimVal int64 = 0 + var cdimNull int = 1 + if len(dim) > 0 { + cdimVal = dim[0] + cdimNull = 0 + } + ckeepdim := int32(0) + if keepdim { + ckeepdim = int32(1) + } + lib.AtgNanquantile1(ptr, ts.ctensor, q.ctensor, cdimVal, cdimNull, ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) NanquantileOut(out *Tensor, q float64, dim []int64, keepdim bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cdimVal int64 = 0 + var cdimNull int = 1 + if len(dim) > 0 { + cdimVal = dim[0] + cdimNull = 0 + } + ckeepdim := int32(0) + if keepdim { + ckeepdim = int32(1) + } + lib.AtgNanquantileOut(ptr, out.ctensor, ts.ctensor, q, cdimVal, cdimNull, ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) NanquantileOut1(out *Tensor, q *Tensor, dim []int64, keepdim bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cdimVal int64 = 0 + var cdimNull int = 1 + if len(dim) > 0 { + cdimVal = dim[0] + cdimNull = 0 + } + ckeepdim := int32(0) + if keepdim { + ckeepdim = int32(1) + } + lib.AtgNanquantileOut1(ptr, out.ctensor, ts.ctensor, q.ctensor, cdimVal, cdimNull, ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Nansum(dtype gotch.DType, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNansum(ptr, ts.ctensor, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Nansum1(dim []int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { + ckeepdim = int32(1) + } + lib.AtgNansum1(ptr, ts.ctensor, dim, len(dim), ckeepdim, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) NansumOut(out *Tensor, dim []int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { + ckeepdim = int32(1) + } + lib.AtgNansumOut(ptr, out.ctensor, ts.ctensor, dim, len(dim), ckeepdim, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func (ts *Tensor) Narrow(dim int64, start int64, length int64, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() @@ -9784,6 +13116,25 @@ func (ts *Tensor) NativeNorm(del bool) (retVal *Tensor, err error) { return retVal, err } +func (ts *Tensor) NativeNorm1(p *Scalar, dim []int64, keepdim bool, dtype gotch.DType, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { + ckeepdim = int32(1) + } + lib.AtgNativeNorm1(ptr, ts.ctensor, p.cscalar, dim, len(dim), ckeepdim, dtype.CInt()) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func (ts *Tensor) Ne(other *Scalar, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() @@ -9907,6 +13258,47 @@ func (ts *Tensor) NegOut(out *Tensor, del bool) (retVal *Tensor, err error) { return retVal, err } +func (ts *Tensor) Negative(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNegative(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Negative_() (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNegative_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) NegativeOut(out *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNegativeOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func (ts *Tensor) NewEmpty(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() @@ -9952,6 +13344,47 @@ func (ts *Tensor) NewZeros(size []int64, optionsKind gotch.DType, optionsDevice return retVal, err } +func (ts *Tensor) Nextafter(other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNextafter(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Nextafter_(other *Tensor) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNextafter_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) NextafterOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNextafterOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func (ts *Tensor) NllLoss(target *Tensor, weight *Tensor, reduction int64, ignoreIndex int64, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() @@ -10279,6 +13712,88 @@ func NormalOut3(out *Tensor, mean float64, std float64, size []int64) (retVal *T return retVal, err } +func (ts *Tensor) NotEqual(other *Scalar, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNotEqual(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) NotEqual1(other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNotEqual1(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) NotEqual_(other *Scalar) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNotEqual_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) NotEqual1_(other *Tensor) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNotEqual1_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) NotEqualOut(out *Tensor, other *Scalar, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNotEqualOut(ptr, out.ctensor, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) NotEqualOut1(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgNotEqualOut1(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func (ts *Tensor) NuclearNorm(keepdim bool, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() @@ -10500,6 +14015,36 @@ func (ts *Tensor) OrmqrOut(out *Tensor, input2 *Tensor, input3 *Tensor, left boo return retVal, err } +func (ts *Tensor) Outer(vec2 *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgOuter(ptr, ts.ctensor, vec2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) OuterOut(out *Tensor, vec2 *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgOuterOut(ptr, out.ctensor, ts.ctensor, vec2.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func PairwiseDistance(x1 *Tensor, x2 *Tensor, p float64, eps float64, keepdim bool) (retVal *Tensor, err error) { ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) @@ -10626,6 +14171,30 @@ func PoissonNllLoss(input *Tensor, target *Tensor, logInput bool, full bool, eps return retVal, err } +func Polar(abs *Tensor, angle *Tensor) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgPolar(ptr, abs.ctensor, angle.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func PolarOut(out *Tensor, abs *Tensor, angle *Tensor) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgPolarOut(ptr, out.ctensor, abs.ctensor, angle.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func (ts *Tensor) Polygamma(n int64, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() @@ -10886,6 +14455,106 @@ func (ts *Tensor) QPerChannelZeroPoints(del bool) (retVal *Tensor, err error) { return retVal, err } +func (ts *Tensor) Quantile(q float64, dim []int64, keepdim bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cdimVal int64 = 0 + var cdimNull int = 1 + if len(dim) > 0 { + cdimVal = dim[0] + cdimNull = 0 + } + ckeepdim := int32(0) + if keepdim { + ckeepdim = int32(1) + } + lib.AtgQuantile(ptr, ts.ctensor, q, cdimVal, cdimNull, ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Quantile1(q *Tensor, dim []int64, keepdim bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cdimVal int64 = 0 + var cdimNull int = 1 + if len(dim) > 0 { + cdimVal = dim[0] + cdimNull = 0 + } + ckeepdim := int32(0) + if keepdim { + ckeepdim = int32(1) + } + lib.AtgQuantile1(ptr, ts.ctensor, q.ctensor, cdimVal, cdimNull, ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) QuantileOut(out *Tensor, q float64, dim []int64, keepdim bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cdimVal int64 = 0 + var cdimNull int = 1 + if len(dim) > 0 { + cdimVal = dim[0] + cdimNull = 0 + } + ckeepdim := int32(0) + if keepdim { + ckeepdim = int32(1) + } + lib.AtgQuantileOut(ptr, out.ctensor, ts.ctensor, q, cdimVal, cdimNull, ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) QuantileOut1(out *Tensor, q *Tensor, dim []int64, keepdim bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cdimVal int64 = 0 + var cdimNull int = 1 + if len(dim) > 0 { + cdimVal = dim[0] + cdimNull = 0 + } + ckeepdim := int32(0) + if keepdim { + ckeepdim = int32(1) + } + lib.AtgQuantileOut1(ptr, out.ctensor, ts.ctensor, q.ctensor, cdimVal, cdimNull, ckeepdim) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func (ts *Tensor) QuantizePerChannel(scales *Tensor, zeroPoints *Tensor, axis int64, dtype gotch.DType, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() @@ -10940,6 +14609,25 @@ func QuantizedGruCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, bIh * return retVal, err } +func (ts *Tensor) QuantizedMaxPool1d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + cceilMode := int32(0) + if ceilMode { + cceilMode = int32(1) + } + lib.AtgQuantizedMaxPool1d(ptr, ts.ctensor, kernelSize, len(kernelSize), stride, len(stride), padding, len(padding), dilation, len(dilation), cceilMode) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func (ts *Tensor) QuantizedMaxPool2d(kernelSize []int64, stride []int64, padding []int64, dilation []int64, ceilMode bool, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() @@ -10983,6 +14671,47 @@ func QuantizedRnnTanhCell(input *Tensor, hx *Tensor, wIh *Tensor, wHh *Tensor, b return retVal, err } +func (ts *Tensor) Rad2deg(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRad2deg(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Rad2deg_() (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRad2deg_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) Rad2degOut(out *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgRad2degOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func Rand(size []int64, optionsKind gotch.DType, optionsDevice gotch.Device) (retVal *Tensor, err error) { ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) @@ -11161,10 +14890,16 @@ func (ts *Tensor) Random1_(to int64) (err error) { return err } -func (ts *Tensor) Random2(from int64, to int64) (err error) { +func (ts *Tensor) Random2(from int64, to []int64) (err error) { ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - lib.AtgRandom2(ptr, ts.ctensor, from, to) + var ctoVal int64 = 0 + var ctoNull int = 1 + if len(to) > 0 { + ctoVal = to[0] + ctoNull = 0 + } + lib.AtgRandom2(ptr, ts.ctensor, from, ctoVal, ctoNull) if err = TorchErr(); err != nil { return err } @@ -11584,13 +15319,19 @@ func RepeatInterleave(repeats *Tensor) (retVal *Tensor, err error) { return retVal, err } -func (ts *Tensor) RepeatInterleave1(repeats *Tensor, dim int64, del bool) (retVal *Tensor, err error) { +func (ts *Tensor) RepeatInterleave1(repeats *Tensor, dim []int64, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() } ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - lib.AtgRepeatInterleave1(ptr, ts.ctensor, repeats.ctensor, dim) + var cdimVal int64 = 0 + var cdimNull int = 1 + if len(dim) > 0 { + cdimVal = dim[0] + cdimNull = 0 + } + lib.AtgRepeatInterleave1(ptr, ts.ctensor, repeats.ctensor, cdimVal, cdimNull) if err = TorchErr(); err != nil { return retVal, err } @@ -11599,13 +15340,19 @@ func (ts *Tensor) RepeatInterleave1(repeats *Tensor, dim int64, del bool) (retVa return retVal, err } -func (ts *Tensor) RepeatInterleave2(repeats int64, dim int64, del bool) (retVal *Tensor, err error) { +func (ts *Tensor) RepeatInterleave2(repeats int64, dim []int64, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() } ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - lib.AtgRepeatInterleave2(ptr, ts.ctensor, repeats, dim) + var cdimVal int64 = 0 + var cdimNull int = 1 + if len(dim) > 0 { + cdimVal = dim[0] + cdimNull = 0 + } + lib.AtgRepeatInterleave2(ptr, ts.ctensor, repeats, cdimVal, cdimNull) if err = TorchErr(); err != nil { return retVal, err } @@ -12224,6 +15971,28 @@ func (ts *Tensor) Scatter1_(dim int64, index *Tensor, value *Scalar) (err error) return err } +func (ts *Tensor) Scatter2(dim int64, index *Tensor, src *Tensor, reduce string) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgScatter2(ptr, ts.ctensor, dim, index.ctensor, src.ctensor, reduce) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) Scatter3(dim int64, index *Tensor, value *Scalar, reduce string) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgScatter3(ptr, ts.ctensor, dim, index.ctensor, value.cscalar, reduce) + if err = TorchErr(); err != nil { + return err + } + + return err +} + func (ts *Tensor) ScatterAdd(dim int64, index *Tensor, src *Tensor, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() @@ -12250,6 +16019,72 @@ func (ts *Tensor) ScatterAdd_(dim int64, index *Tensor, src *Tensor) (err error) return err } +func (ts *Tensor) Searchsorted(sortedSequence *Tensor, outInt32 bool, right bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + coutInt32 := int32(0) + if outInt32 { + coutInt32 = int32(1) + } + cright := int32(0) + if right { + cright = int32(1) + } + lib.AtgSearchsorted(ptr, sortedSequence.ctensor, ts.ctensor, coutInt32, cright) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Searchsorted1(sortedSequence *Tensor, selfScalar *Scalar, outInt32 bool, right bool) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + coutInt32 := int32(0) + if outInt32 { + coutInt32 = int32(1) + } + cright := int32(0) + if right { + cright = int32(1) + } + lib.AtgSearchsorted1(ptr, sortedSequence.ctensor, selfScalar.cscalar, coutInt32, cright) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) SearchsortedOut(out *Tensor, sortedSequence *Tensor, outInt32 bool, right bool, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + coutInt32 := int32(0) + if outInt32 { + coutInt32 = int32(1) + } + cright := int32(0) + if right { + cright = int32(1) + } + lib.AtgSearchsortedOut(ptr, out.ctensor, sortedSequence.ctensor, ts.ctensor, coutInt32, cright) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func (ts *Tensor) Select(dim int64, index int64, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() @@ -12265,6 +16100,18 @@ func (ts *Tensor) Select(dim int64, index int64, del bool) (retVal *Tensor, err return retVal, err } +func SelectBackward(grad *Tensor, inputSizes []int64, dim int64, index int64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSelectBackward(ptr, grad.ctensor, inputSizes, len(inputSizes), dim, index) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func (ts *Tensor) Selu(del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() @@ -12332,6 +16179,47 @@ func (ts *Tensor) SetRequiresGrad(r bool, del bool) (retVal *Tensor, err error) return retVal, err } +func (ts *Tensor) Sgn(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSgn(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Sgn_() (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSgn_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) SgnOut(out *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSgnOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func (ts *Tensor) Sigmoid(del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() @@ -12438,6 +16326,92 @@ func (ts *Tensor) SignOut(out *Tensor, del bool) (retVal *Tensor, err error) { return retVal, err } +func (ts *Tensor) Signbit(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSignbit(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) SignbitOut(out *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSignbitOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Silu(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSilu(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Silu_() (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSilu_(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) SiluBackward(gradOutput *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSiluBackward(ptr, gradOutput.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) SiluOut(out *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSiluOut(ptr, out.ctensor, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func (ts *Tensor) Sin(del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() @@ -12535,6 +16509,18 @@ func (ts *Tensor) Slice(dim int64, start int64, end int64, step int64, del bool) return retVal, err } +func SliceBackward(grad *Tensor, inputSizes []int64, dim int64, start int64, end int64, step int64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSliceBackward(ptr, grad.ctensor, inputSizes, len(inputSizes), dim, start, end, step) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func (ts *Tensor) SlowConv3d(weight *Tensor, kernelSize []int64, bias *Tensor, stride []int64, padding []int64, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() @@ -12670,13 +16656,13 @@ func (ts *Tensor) Smm(mat2 *Tensor, del bool) (retVal *Tensor, err error) { return retVal, err } -func (ts *Tensor) SmoothL1Loss(target *Tensor, reduction int64, del bool) (retVal *Tensor, err error) { +func (ts *Tensor) SmoothL1Loss(target *Tensor, reduction int64, beta float64, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() } ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - lib.AtgSmoothL1Loss(ptr, ts.ctensor, target.ctensor, reduction) + lib.AtgSmoothL1Loss(ptr, ts.ctensor, target.ctensor, reduction, beta) if err = TorchErr(); err != nil { return retVal, err } @@ -12685,13 +16671,13 @@ func (ts *Tensor) SmoothL1Loss(target *Tensor, reduction int64, del bool) (retVa return retVal, err } -func (ts *Tensor) SmoothL1LossBackward(gradOutput *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor, err error) { +func (ts *Tensor) SmoothL1LossBackward(gradOutput *Tensor, target *Tensor, reduction int64, beta float64, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() } ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - lib.AtgSmoothL1LossBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction) + lib.AtgSmoothL1LossBackward(ptr, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction, beta) if err = TorchErr(); err != nil { return retVal, err } @@ -12700,13 +16686,13 @@ func (ts *Tensor) SmoothL1LossBackward(gradOutput *Tensor, target *Tensor, reduc return retVal, err } -func (ts *Tensor) SmoothL1LossBackwardOut(gradInput *Tensor, gradOutput *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor, err error) { +func (ts *Tensor) SmoothL1LossBackwardOut(gradInput *Tensor, gradOutput *Tensor, target *Tensor, reduction int64, beta float64, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() } ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - lib.AtgSmoothL1LossBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction) + lib.AtgSmoothL1LossBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, ts.ctensor, target.ctensor, reduction, beta) if err = TorchErr(); err != nil { return retVal, err } @@ -12715,13 +16701,13 @@ func (ts *Tensor) SmoothL1LossBackwardOut(gradInput *Tensor, gradOutput *Tensor, return retVal, err } -func (ts *Tensor) SmoothL1LossOut(out *Tensor, target *Tensor, reduction int64, del bool) (retVal *Tensor, err error) { +func (ts *Tensor) SmoothL1LossOut(out *Tensor, target *Tensor, reduction int64, beta float64, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() } ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - lib.AtgSmoothL1LossOut(ptr, out.ctensor, ts.ctensor, target.ctensor, reduction) + lib.AtgSmoothL1LossOut(ptr, out.ctensor, ts.ctensor, target.ctensor, reduction, beta) if err = TorchErr(); err != nil { return retVal, err } @@ -13244,12 +17230,24 @@ func (ts *Tensor) StdOut(out *Tensor, dim []int64, unbiased bool, keepdim bool, return retVal, err } -func (ts *Tensor) Stft(nFft int64, hopLength int64, winLength int64, window *Tensor, normalized bool, onesided bool, del bool) (retVal *Tensor, err error) { +func (ts *Tensor) Stft(nFft int64, hopLength []int64, winLength []int64, window *Tensor, normalized bool, onesided bool, returnComplex bool, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() } ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + var chopLengthVal int64 = 0 + var chopLengthNull int = 1 + if len(hopLength) > 0 { + chopLengthVal = hopLength[0] + chopLengthNull = 0 + } + var cwinLengthVal int64 = 0 + var cwinLengthNull int = 1 + if len(winLength) > 0 { + cwinLengthVal = winLength[0] + cwinLengthNull = 0 + } cnormalized := int32(0) if normalized { cnormalized = int32(1) @@ -13258,7 +17256,11 @@ func (ts *Tensor) Stft(nFft int64, hopLength int64, winLength int64, window *Ten if onesided { conesided = int32(1) } - lib.AtgStft(ptr, ts.ctensor, nFft, hopLength, winLength, window.ctensor, cnormalized, conesided) + creturnComplex := int32(0) + if returnComplex { + creturnComplex = int32(1) + } + lib.AtgStft(ptr, ts.ctensor, nFft, chopLengthVal, chopLengthNull, cwinLengthVal, cwinLengthNull, window.ctensor, cnormalized, conesided, creturnComplex) if err = TorchErr(); err != nil { return retVal, err } @@ -13334,6 +17336,73 @@ func (ts *Tensor) SubOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, return retVal, err } +func (ts *Tensor) Subtract(other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSubtract(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Subtract1(other *Scalar, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSubtract1(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Subtract_(other *Tensor) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSubtract_(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) Subtract1_(other *Scalar) (err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSubtract1_(ptr, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return err + } + + return err +} + +func (ts *Tensor) SubtractOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgSubtractOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func (ts *Tensor) Sum(dtype gotch.DType, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() @@ -13443,6 +17512,18 @@ func (ts *Tensor) Take(index *Tensor, del bool) (retVal *Tensor, err error) { return retVal, err } +func TakeBackward(grad *Tensor, input *Tensor, index *Tensor) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTakeBackward(ptr, grad.ctensor, input.ctensor, index.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func (ts *Tensor) TakeOut(out *Tensor, index *Tensor, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() @@ -13856,6 +17937,18 @@ func (ts *Tensor) Trace(del bool) (retVal *Tensor, err error) { return retVal, err } +func TraceBackward(grad *Tensor, sizes []int64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgTraceBackward(ptr, grad.ctensor, sizes, len(sizes)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func (ts *Tensor) Transpose(dim0 int64, dim1 int64, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() @@ -14151,6 +18244,21 @@ func (ts *Tensor) TypeAs(other *Tensor, del bool) (retVal *Tensor, err error) { return retVal, err } +func (ts *Tensor) Unflatten(dim int64, sizes []int64, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgUnflatten(ptr, ts.ctensor, dim, sizes, len(sizes)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func (ts *Tensor) Unfold(dimension int64, size int64, step int64, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() @@ -14166,6 +18274,18 @@ func (ts *Tensor) Unfold(dimension int64, size int64, step int64, del bool) (ret return retVal, err } +func UnfoldBackward(gradIn *Tensor, inputSizes []int64, dim int64, size int64, step int64) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgUnfoldBackward(ptr, gradIn.ctensor, inputSizes, len(inputSizes), dim, size, step) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func (ts *Tensor) Uniform_(from float64, to float64) (err error) { ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) @@ -14203,7 +18323,7 @@ func (ts *Tensor) Unsqueeze_(dim int64) (err error) { return err } -func (ts *Tensor) UpsampleBicubic2d(outputSize []int64, alignCorners bool, scalesH float64, scalesW float64, del bool) (retVal *Tensor, err error) { +func (ts *Tensor) UpsampleBicubic2d(outputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() } @@ -14213,7 +18333,19 @@ func (ts *Tensor) UpsampleBicubic2d(outputSize []int64, alignCorners bool, scale if alignCorners { calignCorners = int32(1) } - lib.AtgUpsampleBicubic2d(ptr, ts.ctensor, outputSize, len(outputSize), calignCorners, scalesH, scalesW) + var cscalesHVal float64 = 0.0 + var cscalesHNull int = 1 + if len(scalesH) > 0 { + cscalesHVal = scalesH[0] + cscalesHNull = 0 + } + var cscalesWVal float64 = 0.0 + var cscalesWNull int = 1 + if len(scalesW) > 0 { + cscalesWVal = scalesW[0] + cscalesWNull = 0 + } + lib.AtgUpsampleBicubic2d(ptr, ts.ctensor, outputSize, len(outputSize), calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) if err = TorchErr(); err != nil { return retVal, err } @@ -14222,14 +18354,26 @@ func (ts *Tensor) UpsampleBicubic2d(outputSize []int64, alignCorners bool, scale return retVal, err } -func UpsampleBicubic2dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH float64, scalesW float64) (retVal *Tensor, err error) { +func UpsampleBicubic2dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64) (retVal *Tensor, err error) { ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) calignCorners := int32(0) if alignCorners { calignCorners = int32(1) } - lib.AtgUpsampleBicubic2dBackward(ptr, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, scalesH, scalesW) + var cscalesHVal float64 = 0.0 + var cscalesHNull int = 1 + if len(scalesH) > 0 { + cscalesHVal = scalesH[0] + cscalesHNull = 0 + } + var cscalesWVal float64 = 0.0 + var cscalesWNull int = 1 + if len(scalesW) > 0 { + cscalesWVal = scalesW[0] + cscalesWNull = 0 + } + lib.AtgUpsampleBicubic2dBackward(ptr, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) if err = TorchErr(); err != nil { return retVal, err } @@ -14238,14 +18382,26 @@ func UpsampleBicubic2dBackward(gradOutput *Tensor, outputSize []int64, inputSize return retVal, err } -func UpsampleBicubic2dBackwardOut(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH float64, scalesW float64) (retVal *Tensor, err error) { +func UpsampleBicubic2dBackwardOut(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64) (retVal *Tensor, err error) { ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) calignCorners := int32(0) if alignCorners { calignCorners = int32(1) } - lib.AtgUpsampleBicubic2dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, scalesH, scalesW) + var cscalesHVal float64 = 0.0 + var cscalesHNull int = 1 + if len(scalesH) > 0 { + cscalesHVal = scalesH[0] + cscalesHNull = 0 + } + var cscalesWVal float64 = 0.0 + var cscalesWNull int = 1 + if len(scalesW) > 0 { + cscalesWVal = scalesW[0] + cscalesWNull = 0 + } + lib.AtgUpsampleBicubic2dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) if err = TorchErr(); err != nil { return retVal, err } @@ -14254,7 +18410,7 @@ func UpsampleBicubic2dBackwardOut(gradInput *Tensor, gradOutput *Tensor, outputS return retVal, err } -func (ts *Tensor) UpsampleBicubic2dOut(out *Tensor, outputSize []int64, alignCorners bool, scalesH float64, scalesW float64, del bool) (retVal *Tensor, err error) { +func (ts *Tensor) UpsampleBicubic2dOut(out *Tensor, outputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() } @@ -14264,7 +18420,19 @@ func (ts *Tensor) UpsampleBicubic2dOut(out *Tensor, outputSize []int64, alignCor if alignCorners { calignCorners = int32(1) } - lib.AtgUpsampleBicubic2dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), calignCorners, scalesH, scalesW) + var cscalesHVal float64 = 0.0 + var cscalesHNull int = 1 + if len(scalesH) > 0 { + cscalesHVal = scalesH[0] + cscalesHNull = 0 + } + var cscalesWVal float64 = 0.0 + var cscalesWNull int = 1 + if len(scalesW) > 0 { + cscalesWVal = scalesW[0] + cscalesWNull = 0 + } + lib.AtgUpsampleBicubic2dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) if err = TorchErr(); err != nil { return retVal, err } @@ -14273,7 +18441,7 @@ func (ts *Tensor) UpsampleBicubic2dOut(out *Tensor, outputSize []int64, alignCor return retVal, err } -func (ts *Tensor) UpsampleBilinear2d(outputSize []int64, alignCorners bool, scalesH float64, scalesW float64, del bool) (retVal *Tensor, err error) { +func (ts *Tensor) UpsampleBilinear2d(outputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() } @@ -14283,7 +18451,19 @@ func (ts *Tensor) UpsampleBilinear2d(outputSize []int64, alignCorners bool, scal if alignCorners { calignCorners = int32(1) } - lib.AtgUpsampleBilinear2d(ptr, ts.ctensor, outputSize, len(outputSize), calignCorners, scalesH, scalesW) + var cscalesHVal float64 = 0.0 + var cscalesHNull int = 1 + if len(scalesH) > 0 { + cscalesHVal = scalesH[0] + cscalesHNull = 0 + } + var cscalesWVal float64 = 0.0 + var cscalesWNull int = 1 + if len(scalesW) > 0 { + cscalesWVal = scalesW[0] + cscalesWNull = 0 + } + lib.AtgUpsampleBilinear2d(ptr, ts.ctensor, outputSize, len(outputSize), calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) if err = TorchErr(); err != nil { return retVal, err } @@ -14292,14 +18472,26 @@ func (ts *Tensor) UpsampleBilinear2d(outputSize []int64, alignCorners bool, scal return retVal, err } -func UpsampleBilinear2dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH float64, scalesW float64) (retVal *Tensor, err error) { +func UpsampleBilinear2dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64) (retVal *Tensor, err error) { ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) calignCorners := int32(0) if alignCorners { calignCorners = int32(1) } - lib.AtgUpsampleBilinear2dBackward(ptr, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, scalesH, scalesW) + var cscalesHVal float64 = 0.0 + var cscalesHNull int = 1 + if len(scalesH) > 0 { + cscalesHVal = scalesH[0] + cscalesHNull = 0 + } + var cscalesWVal float64 = 0.0 + var cscalesWNull int = 1 + if len(scalesW) > 0 { + cscalesWVal = scalesW[0] + cscalesWNull = 0 + } + lib.AtgUpsampleBilinear2dBackward(ptr, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) if err = TorchErr(); err != nil { return retVal, err } @@ -14308,14 +18500,26 @@ func UpsampleBilinear2dBackward(gradOutput *Tensor, outputSize []int64, inputSiz return retVal, err } -func UpsampleBilinear2dBackwardOut(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH float64, scalesW float64) (retVal *Tensor, err error) { +func UpsampleBilinear2dBackwardOut(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64) (retVal *Tensor, err error) { ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) calignCorners := int32(0) if alignCorners { calignCorners = int32(1) } - lib.AtgUpsampleBilinear2dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, scalesH, scalesW) + var cscalesHVal float64 = 0.0 + var cscalesHNull int = 1 + if len(scalesH) > 0 { + cscalesHVal = scalesH[0] + cscalesHNull = 0 + } + var cscalesWVal float64 = 0.0 + var cscalesWNull int = 1 + if len(scalesW) > 0 { + cscalesWVal = scalesW[0] + cscalesWNull = 0 + } + lib.AtgUpsampleBilinear2dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) if err = TorchErr(); err != nil { return retVal, err } @@ -14324,7 +18528,7 @@ func UpsampleBilinear2dBackwardOut(gradInput *Tensor, gradOutput *Tensor, output return retVal, err } -func (ts *Tensor) UpsampleBilinear2dOut(out *Tensor, outputSize []int64, alignCorners bool, scalesH float64, scalesW float64, del bool) (retVal *Tensor, err error) { +func (ts *Tensor) UpsampleBilinear2dOut(out *Tensor, outputSize []int64, alignCorners bool, scalesH []float64, scalesW []float64, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() } @@ -14334,7 +18538,19 @@ func (ts *Tensor) UpsampleBilinear2dOut(out *Tensor, outputSize []int64, alignCo if alignCorners { calignCorners = int32(1) } - lib.AtgUpsampleBilinear2dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), calignCorners, scalesH, scalesW) + var cscalesHVal float64 = 0.0 + var cscalesHNull int = 1 + if len(scalesH) > 0 { + cscalesHVal = scalesH[0] + cscalesHNull = 0 + } + var cscalesWVal float64 = 0.0 + var cscalesWNull int = 1 + if len(scalesW) > 0 { + cscalesWVal = scalesW[0] + cscalesWNull = 0 + } + lib.AtgUpsampleBilinear2dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), calignCorners, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) if err = TorchErr(); err != nil { return retVal, err } @@ -14343,7 +18559,7 @@ func (ts *Tensor) UpsampleBilinear2dOut(out *Tensor, outputSize []int64, alignCo return retVal, err } -func (ts *Tensor) UpsampleLinear1d(outputSize []int64, alignCorners bool, scales float64, del bool) (retVal *Tensor, err error) { +func (ts *Tensor) UpsampleLinear1d(outputSize []int64, alignCorners bool, scales []float64, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() } @@ -14353,7 +18569,13 @@ func (ts *Tensor) UpsampleLinear1d(outputSize []int64, alignCorners bool, scales if alignCorners { calignCorners = int32(1) } - lib.AtgUpsampleLinear1d(ptr, ts.ctensor, outputSize, len(outputSize), calignCorners, scales) + var cscalesVal float64 = 0.0 + var cscalesNull int = 1 + if len(scales) > 0 { + cscalesVal = scales[0] + cscalesNull = 0 + } + lib.AtgUpsampleLinear1d(ptr, ts.ctensor, outputSize, len(outputSize), calignCorners, cscalesVal, cscalesNull) if err = TorchErr(); err != nil { return retVal, err } @@ -14362,14 +18584,20 @@ func (ts *Tensor) UpsampleLinear1d(outputSize []int64, alignCorners bool, scales return retVal, err } -func UpsampleLinear1dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scales float64) (retVal *Tensor, err error) { +func UpsampleLinear1dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scales []float64) (retVal *Tensor, err error) { ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) calignCorners := int32(0) if alignCorners { calignCorners = int32(1) } - lib.AtgUpsampleLinear1dBackward(ptr, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, scales) + var cscalesVal float64 = 0.0 + var cscalesNull int = 1 + if len(scales) > 0 { + cscalesVal = scales[0] + cscalesNull = 0 + } + lib.AtgUpsampleLinear1dBackward(ptr, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, cscalesVal, cscalesNull) if err = TorchErr(); err != nil { return retVal, err } @@ -14378,14 +18606,20 @@ func UpsampleLinear1dBackward(gradOutput *Tensor, outputSize []int64, inputSize return retVal, err } -func UpsampleLinear1dBackwardOut(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scales float64) (retVal *Tensor, err error) { +func UpsampleLinear1dBackwardOut(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scales []float64) (retVal *Tensor, err error) { ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) calignCorners := int32(0) if alignCorners { calignCorners = int32(1) } - lib.AtgUpsampleLinear1dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, scales) + var cscalesVal float64 = 0.0 + var cscalesNull int = 1 + if len(scales) > 0 { + cscalesVal = scales[0] + cscalesNull = 0 + } + lib.AtgUpsampleLinear1dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, cscalesVal, cscalesNull) if err = TorchErr(); err != nil { return retVal, err } @@ -14394,7 +18628,7 @@ func UpsampleLinear1dBackwardOut(gradInput *Tensor, gradOutput *Tensor, outputSi return retVal, err } -func (ts *Tensor) UpsampleLinear1dOut(out *Tensor, outputSize []int64, alignCorners bool, scales float64, del bool) (retVal *Tensor, err error) { +func (ts *Tensor) UpsampleLinear1dOut(out *Tensor, outputSize []int64, alignCorners bool, scales []float64, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() } @@ -14404,7 +18638,13 @@ func (ts *Tensor) UpsampleLinear1dOut(out *Tensor, outputSize []int64, alignCorn if alignCorners { calignCorners = int32(1) } - lib.AtgUpsampleLinear1dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), calignCorners, scales) + var cscalesVal float64 = 0.0 + var cscalesNull int = 1 + if len(scales) > 0 { + cscalesVal = scales[0] + cscalesNull = 0 + } + lib.AtgUpsampleLinear1dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), calignCorners, cscalesVal, cscalesNull) if err = TorchErr(); err != nil { return retVal, err } @@ -14413,13 +18653,19 @@ func (ts *Tensor) UpsampleLinear1dOut(out *Tensor, outputSize []int64, alignCorn return retVal, err } -func (ts *Tensor) UpsampleNearest1d(outputSize []int64, scales float64, del bool) (retVal *Tensor, err error) { +func (ts *Tensor) UpsampleNearest1d(outputSize []int64, scales []float64, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() } ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - lib.AtgUpsampleNearest1d(ptr, ts.ctensor, outputSize, len(outputSize), scales) + var cscalesVal float64 = 0.0 + var cscalesNull int = 1 + if len(scales) > 0 { + cscalesVal = scales[0] + cscalesNull = 0 + } + lib.AtgUpsampleNearest1d(ptr, ts.ctensor, outputSize, len(outputSize), cscalesVal, cscalesNull) if err = TorchErr(); err != nil { return retVal, err } @@ -14428,10 +18674,16 @@ func (ts *Tensor) UpsampleNearest1d(outputSize []int64, scales float64, del bool return retVal, err } -func UpsampleNearest1dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, scales float64) (retVal *Tensor, err error) { +func UpsampleNearest1dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, scales []float64) (retVal *Tensor, err error) { ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - lib.AtgUpsampleNearest1dBackward(ptr, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), scales) + var cscalesVal float64 = 0.0 + var cscalesNull int = 1 + if len(scales) > 0 { + cscalesVal = scales[0] + cscalesNull = 0 + } + lib.AtgUpsampleNearest1dBackward(ptr, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), cscalesVal, cscalesNull) if err = TorchErr(); err != nil { return retVal, err } @@ -14440,10 +18692,16 @@ func UpsampleNearest1dBackward(gradOutput *Tensor, outputSize []int64, inputSize return retVal, err } -func UpsampleNearest1dBackwardOut(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, scales float64) (retVal *Tensor, err error) { +func UpsampleNearest1dBackwardOut(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, scales []float64) (retVal *Tensor, err error) { ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - lib.AtgUpsampleNearest1dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), scales) + var cscalesVal float64 = 0.0 + var cscalesNull int = 1 + if len(scales) > 0 { + cscalesVal = scales[0] + cscalesNull = 0 + } + lib.AtgUpsampleNearest1dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), cscalesVal, cscalesNull) if err = TorchErr(); err != nil { return retVal, err } @@ -14452,13 +18710,19 @@ func UpsampleNearest1dBackwardOut(gradInput *Tensor, gradOutput *Tensor, outputS return retVal, err } -func (ts *Tensor) UpsampleNearest1dOut(out *Tensor, outputSize []int64, scales float64, del bool) (retVal *Tensor, err error) { +func (ts *Tensor) UpsampleNearest1dOut(out *Tensor, outputSize []int64, scales []float64, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() } ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - lib.AtgUpsampleNearest1dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), scales) + var cscalesVal float64 = 0.0 + var cscalesNull int = 1 + if len(scales) > 0 { + cscalesVal = scales[0] + cscalesNull = 0 + } + lib.AtgUpsampleNearest1dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), cscalesVal, cscalesNull) if err = TorchErr(); err != nil { return retVal, err } @@ -14467,13 +18731,25 @@ func (ts *Tensor) UpsampleNearest1dOut(out *Tensor, outputSize []int64, scales f return retVal, err } -func (ts *Tensor) UpsampleNearest2d(outputSize []int64, scalesH float64, scalesW float64, del bool) (retVal *Tensor, err error) { +func (ts *Tensor) UpsampleNearest2d(outputSize []int64, scalesH []float64, scalesW []float64, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() } ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - lib.AtgUpsampleNearest2d(ptr, ts.ctensor, outputSize, len(outputSize), scalesH, scalesW) + var cscalesHVal float64 = 0.0 + var cscalesHNull int = 1 + if len(scalesH) > 0 { + cscalesHVal = scalesH[0] + cscalesHNull = 0 + } + var cscalesWVal float64 = 0.0 + var cscalesWNull int = 1 + if len(scalesW) > 0 { + cscalesWVal = scalesW[0] + cscalesWNull = 0 + } + lib.AtgUpsampleNearest2d(ptr, ts.ctensor, outputSize, len(outputSize), cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) if err = TorchErr(); err != nil { return retVal, err } @@ -14482,10 +18758,22 @@ func (ts *Tensor) UpsampleNearest2d(outputSize []int64, scalesH float64, scalesW return retVal, err } -func UpsampleNearest2dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesH float64, scalesW float64) (retVal *Tensor, err error) { +func UpsampleNearest2dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesH []float64, scalesW []float64) (retVal *Tensor, err error) { ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - lib.AtgUpsampleNearest2dBackward(ptr, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), scalesH, scalesW) + var cscalesHVal float64 = 0.0 + var cscalesHNull int = 1 + if len(scalesH) > 0 { + cscalesHVal = scalesH[0] + cscalesHNull = 0 + } + var cscalesWVal float64 = 0.0 + var cscalesWNull int = 1 + if len(scalesW) > 0 { + cscalesWVal = scalesW[0] + cscalesWNull = 0 + } + lib.AtgUpsampleNearest2dBackward(ptr, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) if err = TorchErr(); err != nil { return retVal, err } @@ -14494,10 +18782,22 @@ func UpsampleNearest2dBackward(gradOutput *Tensor, outputSize []int64, inputSize return retVal, err } -func UpsampleNearest2dBackwardOut(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesH float64, scalesW float64) (retVal *Tensor, err error) { +func UpsampleNearest2dBackwardOut(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesH []float64, scalesW []float64) (retVal *Tensor, err error) { ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - lib.AtgUpsampleNearest2dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), scalesH, scalesW) + var cscalesHVal float64 = 0.0 + var cscalesHNull int = 1 + if len(scalesH) > 0 { + cscalesHVal = scalesH[0] + cscalesHNull = 0 + } + var cscalesWVal float64 = 0.0 + var cscalesWNull int = 1 + if len(scalesW) > 0 { + cscalesWVal = scalesW[0] + cscalesWNull = 0 + } + lib.AtgUpsampleNearest2dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) if err = TorchErr(); err != nil { return retVal, err } @@ -14506,13 +18806,25 @@ func UpsampleNearest2dBackwardOut(gradInput *Tensor, gradOutput *Tensor, outputS return retVal, err } -func (ts *Tensor) UpsampleNearest2dOut(out *Tensor, outputSize []int64, scalesH float64, scalesW float64, del bool) (retVal *Tensor, err error) { +func (ts *Tensor) UpsampleNearest2dOut(out *Tensor, outputSize []int64, scalesH []float64, scalesW []float64, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() } ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - lib.AtgUpsampleNearest2dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), scalesH, scalesW) + var cscalesHVal float64 = 0.0 + var cscalesHNull int = 1 + if len(scalesH) > 0 { + cscalesHVal = scalesH[0] + cscalesHNull = 0 + } + var cscalesWVal float64 = 0.0 + var cscalesWNull int = 1 + if len(scalesW) > 0 { + cscalesWVal = scalesW[0] + cscalesWNull = 0 + } + lib.AtgUpsampleNearest2dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) if err = TorchErr(); err != nil { return retVal, err } @@ -14521,13 +18833,31 @@ func (ts *Tensor) UpsampleNearest2dOut(out *Tensor, outputSize []int64, scalesH return retVal, err } -func (ts *Tensor) UpsampleNearest3d(outputSize []int64, scalesD float64, scalesH float64, scalesW float64, del bool) (retVal *Tensor, err error) { +func (ts *Tensor) UpsampleNearest3d(outputSize []int64, scalesD []float64, scalesH []float64, scalesW []float64, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() } ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - lib.AtgUpsampleNearest3d(ptr, ts.ctensor, outputSize, len(outputSize), scalesD, scalesH, scalesW) + var cscalesDVal float64 = 0.0 + var cscalesDNull int = 1 + if len(scalesD) > 0 { + cscalesDVal = scalesD[0] + cscalesDNull = 0 + } + var cscalesHVal float64 = 0.0 + var cscalesHNull int = 1 + if len(scalesH) > 0 { + cscalesHVal = scalesH[0] + cscalesHNull = 0 + } + var cscalesWVal float64 = 0.0 + var cscalesWNull int = 1 + if len(scalesW) > 0 { + cscalesWVal = scalesW[0] + cscalesWNull = 0 + } + lib.AtgUpsampleNearest3d(ptr, ts.ctensor, outputSize, len(outputSize), cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) if err = TorchErr(); err != nil { return retVal, err } @@ -14536,10 +18866,28 @@ func (ts *Tensor) UpsampleNearest3d(outputSize []int64, scalesD float64, scalesH return retVal, err } -func UpsampleNearest3dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesD float64, scalesH float64, scalesW float64) (retVal *Tensor, err error) { +func UpsampleNearest3dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesD []float64, scalesH []float64, scalesW []float64) (retVal *Tensor, err error) { ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - lib.AtgUpsampleNearest3dBackward(ptr, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), scalesD, scalesH, scalesW) + var cscalesDVal float64 = 0.0 + var cscalesDNull int = 1 + if len(scalesD) > 0 { + cscalesDVal = scalesD[0] + cscalesDNull = 0 + } + var cscalesHVal float64 = 0.0 + var cscalesHNull int = 1 + if len(scalesH) > 0 { + cscalesHVal = scalesH[0] + cscalesHNull = 0 + } + var cscalesWVal float64 = 0.0 + var cscalesWNull int = 1 + if len(scalesW) > 0 { + cscalesWVal = scalesW[0] + cscalesWNull = 0 + } + lib.AtgUpsampleNearest3dBackward(ptr, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) if err = TorchErr(); err != nil { return retVal, err } @@ -14548,10 +18896,28 @@ func UpsampleNearest3dBackward(gradOutput *Tensor, outputSize []int64, inputSize return retVal, err } -func UpsampleNearest3dBackwardOut(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesD float64, scalesH float64, scalesW float64) (retVal *Tensor, err error) { +func UpsampleNearest3dBackwardOut(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, scalesD []float64, scalesH []float64, scalesW []float64) (retVal *Tensor, err error) { ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - lib.AtgUpsampleNearest3dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), scalesD, scalesH, scalesW) + var cscalesDVal float64 = 0.0 + var cscalesDNull int = 1 + if len(scalesD) > 0 { + cscalesDVal = scalesD[0] + cscalesDNull = 0 + } + var cscalesHVal float64 = 0.0 + var cscalesHNull int = 1 + if len(scalesH) > 0 { + cscalesHVal = scalesH[0] + cscalesHNull = 0 + } + var cscalesWVal float64 = 0.0 + var cscalesWNull int = 1 + if len(scalesW) > 0 { + cscalesWVal = scalesW[0] + cscalesWNull = 0 + } + lib.AtgUpsampleNearest3dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) if err = TorchErr(); err != nil { return retVal, err } @@ -14560,13 +18926,31 @@ func UpsampleNearest3dBackwardOut(gradInput *Tensor, gradOutput *Tensor, outputS return retVal, err } -func (ts *Tensor) UpsampleNearest3dOut(out *Tensor, outputSize []int64, scalesD float64, scalesH float64, scalesW float64, del bool) (retVal *Tensor, err error) { +func (ts *Tensor) UpsampleNearest3dOut(out *Tensor, outputSize []int64, scalesD []float64, scalesH []float64, scalesW []float64, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() } ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) - lib.AtgUpsampleNearest3dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), scalesD, scalesH, scalesW) + var cscalesDVal float64 = 0.0 + var cscalesDNull int = 1 + if len(scalesD) > 0 { + cscalesDVal = scalesD[0] + cscalesDNull = 0 + } + var cscalesHVal float64 = 0.0 + var cscalesHNull int = 1 + if len(scalesH) > 0 { + cscalesHVal = scalesH[0] + cscalesHNull = 0 + } + var cscalesWVal float64 = 0.0 + var cscalesWNull int = 1 + if len(scalesW) > 0 { + cscalesWVal = scalesW[0] + cscalesWNull = 0 + } + lib.AtgUpsampleNearest3dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) if err = TorchErr(); err != nil { return retVal, err } @@ -14575,7 +18959,7 @@ func (ts *Tensor) UpsampleNearest3dOut(out *Tensor, outputSize []int64, scalesD return retVal, err } -func (ts *Tensor) UpsampleTrilinear3d(outputSize []int64, alignCorners bool, scalesD float64, scalesH float64, scalesW float64, del bool) (retVal *Tensor, err error) { +func (ts *Tensor) UpsampleTrilinear3d(outputSize []int64, alignCorners bool, scalesD []float64, scalesH []float64, scalesW []float64, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() } @@ -14585,7 +18969,25 @@ func (ts *Tensor) UpsampleTrilinear3d(outputSize []int64, alignCorners bool, sca if alignCorners { calignCorners = int32(1) } - lib.AtgUpsampleTrilinear3d(ptr, ts.ctensor, outputSize, len(outputSize), calignCorners, scalesD, scalesH, scalesW) + var cscalesDVal float64 = 0.0 + var cscalesDNull int = 1 + if len(scalesD) > 0 { + cscalesDVal = scalesD[0] + cscalesDNull = 0 + } + var cscalesHVal float64 = 0.0 + var cscalesHNull int = 1 + if len(scalesH) > 0 { + cscalesHVal = scalesH[0] + cscalesHNull = 0 + } + var cscalesWVal float64 = 0.0 + var cscalesWNull int = 1 + if len(scalesW) > 0 { + cscalesWVal = scalesW[0] + cscalesWNull = 0 + } + lib.AtgUpsampleTrilinear3d(ptr, ts.ctensor, outputSize, len(outputSize), calignCorners, cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) if err = TorchErr(); err != nil { return retVal, err } @@ -14594,14 +18996,32 @@ func (ts *Tensor) UpsampleTrilinear3d(outputSize []int64, alignCorners bool, sca return retVal, err } -func UpsampleTrilinear3dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesD float64, scalesH float64, scalesW float64) (retVal *Tensor, err error) { +func UpsampleTrilinear3dBackward(gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesD []float64, scalesH []float64, scalesW []float64) (retVal *Tensor, err error) { ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) calignCorners := int32(0) if alignCorners { calignCorners = int32(1) } - lib.AtgUpsampleTrilinear3dBackward(ptr, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, scalesD, scalesH, scalesW) + var cscalesDVal float64 = 0.0 + var cscalesDNull int = 1 + if len(scalesD) > 0 { + cscalesDVal = scalesD[0] + cscalesDNull = 0 + } + var cscalesHVal float64 = 0.0 + var cscalesHNull int = 1 + if len(scalesH) > 0 { + cscalesHVal = scalesH[0] + cscalesHNull = 0 + } + var cscalesWVal float64 = 0.0 + var cscalesWNull int = 1 + if len(scalesW) > 0 { + cscalesWVal = scalesW[0] + cscalesWNull = 0 + } + lib.AtgUpsampleTrilinear3dBackward(ptr, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) if err = TorchErr(); err != nil { return retVal, err } @@ -14610,14 +19030,32 @@ func UpsampleTrilinear3dBackward(gradOutput *Tensor, outputSize []int64, inputSi return retVal, err } -func UpsampleTrilinear3dBackwardOut(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesD float64, scalesH float64, scalesW float64) (retVal *Tensor, err error) { +func UpsampleTrilinear3dBackwardOut(gradInput *Tensor, gradOutput *Tensor, outputSize []int64, inputSize []int64, alignCorners bool, scalesD []float64, scalesH []float64, scalesW []float64) (retVal *Tensor, err error) { ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) calignCorners := int32(0) if alignCorners { calignCorners = int32(1) } - lib.AtgUpsampleTrilinear3dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, scalesD, scalesH, scalesW) + var cscalesDVal float64 = 0.0 + var cscalesDNull int = 1 + if len(scalesD) > 0 { + cscalesDVal = scalesD[0] + cscalesDNull = 0 + } + var cscalesHVal float64 = 0.0 + var cscalesHNull int = 1 + if len(scalesH) > 0 { + cscalesHVal = scalesH[0] + cscalesHNull = 0 + } + var cscalesWVal float64 = 0.0 + var cscalesWNull int = 1 + if len(scalesW) > 0 { + cscalesWVal = scalesW[0] + cscalesWNull = 0 + } + lib.AtgUpsampleTrilinear3dBackwardOut(ptr, gradInput.ctensor, gradOutput.ctensor, outputSize, len(outputSize), inputSize, len(inputSize), calignCorners, cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) if err = TorchErr(); err != nil { return retVal, err } @@ -14626,7 +19064,7 @@ func UpsampleTrilinear3dBackwardOut(gradInput *Tensor, gradOutput *Tensor, outpu return retVal, err } -func (ts *Tensor) UpsampleTrilinear3dOut(out *Tensor, outputSize []int64, alignCorners bool, scalesD float64, scalesH float64, scalesW float64, del bool) (retVal *Tensor, err error) { +func (ts *Tensor) UpsampleTrilinear3dOut(out *Tensor, outputSize []int64, alignCorners bool, scalesD []float64, scalesH []float64, scalesW []float64, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() } @@ -14636,7 +19074,41 @@ func (ts *Tensor) UpsampleTrilinear3dOut(out *Tensor, outputSize []int64, alignC if alignCorners { calignCorners = int32(1) } - lib.AtgUpsampleTrilinear3dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), calignCorners, scalesD, scalesH, scalesW) + var cscalesDVal float64 = 0.0 + var cscalesDNull int = 1 + if len(scalesD) > 0 { + cscalesDVal = scalesD[0] + cscalesDNull = 0 + } + var cscalesHVal float64 = 0.0 + var cscalesHNull int = 1 + if len(scalesH) > 0 { + cscalesHVal = scalesH[0] + cscalesHNull = 0 + } + var cscalesWVal float64 = 0.0 + var cscalesWNull int = 1 + if len(scalesW) > 0 { + cscalesWVal = scalesW[0] + cscalesWNull = 0 + } + lib.AtgUpsampleTrilinear3dOut(ptr, out.ctensor, ts.ctensor, outputSize, len(outputSize), calignCorners, cscalesDVal, cscalesDNull, cscalesHVal, cscalesHNull, cscalesWVal, cscalesWNull) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func ValueSelectingReductionBackward(grad *Tensor, dim int64, indices *Tensor, sizes []int64, keepdim bool) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + ckeepdim := int32(0) + if keepdim { + ckeepdim = int32(1) + } + lib.AtgValueSelectingReductionBackward(ptr, grad.ctensor, dim, indices.ctensor, sizes, len(sizes), ckeepdim) if err = TorchErr(); err != nil { return retVal, err } @@ -14660,6 +19132,28 @@ func (ts *Tensor) Values(del bool) (retVal *Tensor, err error) { return retVal, err } +func Vander(x *Tensor, n []int64, increasing bool) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var cnVal int64 = 0 + var cnNull int = 1 + if len(n) > 0 { + cnVal = n[0] + cnNull = 0 + } + cincreasing := int32(0) + if increasing { + cincreasing = int32(1) + } + lib.AtgVander(ptr, x.ctensor, cnVal, cnNull, cincreasing) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func (ts *Tensor) Var(unbiased bool, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() @@ -14725,6 +19219,36 @@ func (ts *Tensor) VarOut(out *Tensor, dim []int64, unbiased bool, keepdim bool, return retVal, err } +func (ts *Tensor) Vdot(other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgVdot(ptr, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) VdotOut(out *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgVdotOut(ptr, out.ctensor, ts.ctensor, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func (ts *Tensor) View(size []int64, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() @@ -14755,6 +19279,68 @@ func (ts *Tensor) ViewAs(other *Tensor, del bool) (retVal *Tensor, err error) { return retVal, err } +func (ts *Tensor) ViewAsComplex(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgViewAsComplex(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) ViewAsReal(del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgViewAsReal(ptr, ts.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Vstack(tensors []Tensor) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var ctensors []lib.Ctensor + for _, t := range tensors { + ctensors = append(ctensors, t.ctensor) + } + lib.AtgVstack(ptr, ctensors, len(ctensors)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func VstackOut(out *Tensor, tensors []Tensor) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + var ctensors []lib.Ctensor + for _, t := range tensors { + ctensors = append(ctensors, t.ctensor) + } + lib.AtgVstackOut(ptr, out.ctensor, ctensors, len(ctensors)) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func (ts *Tensor) Where1(condition *Tensor, other *Tensor, del bool) (retVal *Tensor, err error) { if del { defer ts.MustDrop() @@ -14770,6 +19356,45 @@ func (ts *Tensor) Where1(condition *Tensor, other *Tensor, del bool) (retVal *Te return retVal, err } +func Where2(condition *Tensor, selfScalar *Scalar, other *Tensor) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgWhere2(ptr, condition.ctensor, selfScalar.cscalar, other.ctensor) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func (ts *Tensor) Where3(condition *Tensor, other *Scalar, del bool) (retVal *Tensor, err error) { + if del { + defer ts.MustDrop() + } + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgWhere3(ptr, condition.ctensor, ts.ctensor, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + +func Where4(condition *Tensor, selfScalar *Scalar, other *Scalar) (retVal *Tensor, err error) { + ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) + + lib.AtgWhere4(ptr, condition.ctensor, selfScalar.cscalar, other.cscalar) + if err = TorchErr(); err != nil { + return retVal, err + } + retVal = &Tensor{ctensor: *ptr} + + return retVal, err +} + func (ts *Tensor) Zero_() (err error) { ptr := (*lib.Ctensor)(unsafe.Pointer(C.malloc(0))) diff --git a/tensor/tensor.go b/tensor/tensor.go index 228c020..590105a 100644 --- a/tensor/tensor.go +++ b/tensor/tensor.go @@ -1171,7 +1171,7 @@ func (ts *Tensor) Swish() *Tensor { } func (ts *Tensor) AvgPool2DDefault(ksize int64, del bool) *Tensor { - return ts.MustAvgPool2d([]int64{ksize, ksize}, []int64{ksize, ksize}, []int64{0, 0}, false, true, 1, del) + return ts.MustAvgPool2d([]int64{ksize, ksize}, []int64{ksize, ksize}, []int64{0, 0}, false, true, []int64{1}, del) } // SaveMultiNew saves a slice of named tensors to the given file path. diff --git a/vision/densenet.go b/vision/densenet.go index a8eca2a..3fc128a 100644 --- a/vision/densenet.go +++ b/vision/densenet.go @@ -103,7 +103,7 @@ func densenet(p *nn.Path, cIn, cOut, bnSize int64, blockConfig []int64, growth i seq.AddFn(nn.NewFunc(func(xs *ts.Tensor) *ts.Tensor { tmp1 := xs.MustRelu(false) - tmp2 := tmp1.MustAvgPool2d([]int64{7, 7}, []int64{1, 1}, []int64{0, 0}, false, true, 1, true) + tmp2 := tmp1.MustAvgPool2d([]int64{7, 7}, []int64{1, 1}, []int64{0, 0}, false, true, []int64{1}, true) res := tmp2.FlatView() tmp2.MustDrop() return res diff --git a/vision/inception.go b/vision/inception.go index b646f0d..5790d30 100644 --- a/vision/inception.go +++ b/vision/inception.go @@ -78,7 +78,7 @@ func inceptionA(p *nn.Path, cIn, cPool int64) ts.ModuleT { b3Ts := b3Tmp2.ApplyT(b33, train) b3Tmp2.MustDrop() - bpoolTmp := xs.MustAvgPool2d([]int64{3, 3}, []int64{1, 1}, []int64{1, 1}, false, true, 9, false) + bpoolTmp := xs.MustAvgPool2d([]int64{3, 3}, []int64{1, 1}, []int64{1, 1}, false, true, []int64{9}, false) bpoolTs := bpoolTmp.ApplyT(bpool, train) res := ts.MustCat([]ts.Tensor{*b1Ts, *b2Ts, *b3Ts, *bpoolTs}, 1) @@ -145,7 +145,7 @@ func inceptionC(p *nn.Path, cIn int64, c7 int64) ts.ModuleT { b3Ts := b3Tmp4.ApplyT(b35, train) b3Tmp4.MustDrop() - bpTmp1 := xs.MustAvgPool2d([]int64{3, 3}, []int64{1, 1}, []int64{1, 1}, false, true, 9, false) + bpTmp1 := xs.MustAvgPool2d([]int64{3, 3}, []int64{1, 1}, []int64{1, 1}, false, true, []int64{9}, false) bpoolTs := bpTmp1.ApplyT(bpool, train) return ts.MustCat([]ts.Tensor{*b1Ts, *b2Ts, *b3Ts, *bpoolTs}, 1) @@ -211,7 +211,7 @@ func inceptionE(p *nn.Path, cIn int64) ts.ModuleT { b3bTs := b3Tmp2.ApplyT(b33b, train) b3Ts := ts.MustCat([]ts.Tensor{*b3aTs, *b3bTs}, 1) - bpTmp1 := xs.MustAvgPool2d([]int64{3, 3}, []int64{1, 1}, []int64{1, 1}, false, true, 9, false) + bpTmp1 := xs.MustAvgPool2d([]int64{3, 3}, []int64{1, 1}, []int64{1, 1}, false, true, []int64{9}, false) bpoolTs := bpTmp1.ApplyT(bpool, train) return ts.MustCat([]ts.Tensor{*b1Ts, *b2Ts, *b3Ts, *bpoolTs}, 1)